1
0
mirror of https://github.com/ellmau/adf-obdd.git synced 2025-12-19 09:29:36 +01:00

Added some paralelism ideas, none have been working

This commit is contained in:
Stefan Ellmauthaler 2022-06-01 15:45:33 +02:00
parent be68af7d05
commit 31e02c6de4
Failed to extract signature
8 changed files with 391 additions and 114 deletions

85
Cargo.lock generated
View File

@ -14,6 +14,7 @@ dependencies = [
"nom", "nom",
"quickcheck", "quickcheck",
"quickcheck_macros", "quickcheck_macros",
"rayon",
"serde", "serde",
"serde_json", "serde_json",
"test-log", "test-log",
@ -172,6 +173,41 @@ dependencies = [
"os_str_bytes", "os_str_bytes",
] ]
[[package]]
name = "crossbeam-channel"
version = "0.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
dependencies = [
"cfg-if",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c"
dependencies = [
"autocfg",
"cfg-if",
"crossbeam-utils",
"lazy_static",
"memoffset",
"scopeguard",
]
[[package]] [[package]]
name = "crossbeam-utils" name = "crossbeam-utils"
version = "0.8.7" version = "0.8.7"
@ -428,6 +464,15 @@ version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]] [[package]]
name = "minimal-lexical" name = "minimal-lexical"
version = "0.2.1" version = "0.2.1"
@ -459,6 +504,16 @@ dependencies = [
"autocfg", "autocfg",
] ]
[[package]]
name = "num_cpus"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
dependencies = [
"hermit-abi",
"libc",
]
[[package]] [[package]]
name = "once_cell" name = "once_cell"
version = "1.9.0" version = "1.9.0"
@ -630,6 +685,30 @@ dependencies = [
"rand_core 0.5.1", "rand_core 0.5.1",
] ]
[[package]]
name = "rayon"
version = "1.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d"
dependencies = [
"autocfg",
"crossbeam-deque",
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-utils",
"num_cpus",
]
[[package]] [[package]]
name = "redox_syscall" name = "redox_syscall"
version = "0.2.11" version = "0.2.11"
@ -686,6 +765,12 @@ dependencies = [
"winapi-util", "winapi-util",
] ]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]] [[package]]
name = "serde" name = "serde"
version = "1.0.137" version = "1.0.137"

View File

@ -216,7 +216,7 @@ impl App {
} }
if self.stable { if self.stable {
for model in naive_adf.stable() { for model in naive_adf.stable_par() {
print!("{}", printer.print_interpretation(&model)); print!("{}", printer.print_interpretation(&model));
} }
} }

View File

@ -30,6 +30,7 @@ serde = { version = "1.0", features = ["derive","rc"] }
serde_json = "1.0" serde_json = "1.0"
biodivine-lib-bdd = "0.3.0" biodivine-lib-bdd = "0.3.0"
derivative = "2.2.0" derivative = "2.2.0"
rayon = "1.5.3"
[dev-dependencies] [dev-dependencies]
test-log = "0.2" test-log = "0.2"

View File

@ -5,6 +5,9 @@ This module describes the abstract dialectical framework.
- computing fixpoints - computing fixpoints
*/ */
use std::{marker::Sync, sync::Mutex};
use rayon::prelude::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::{
@ -49,9 +52,20 @@ impl Adf {
parser.dict_rc_refcell(), parser.dict_rc_refcell(),
), ),
bdd: Bdd::new(), bdd: Bdd::new(),
ac: vec![Term(0); parser.namelist_rc_refcell().as_ref().borrow().len()], ac: vec![
Term(0);
parser
.namelist_rc_refcell()
.read()
.expect("Failed to gain read lock")
.len()
],
}; };
(0..parser.namelist_rc_refcell().borrow().len()) (0..parser
.namelist_rc_refcell()
.read()
.expect("Failed to gain read lock")
.len())
.into_iter() .into_iter()
.for_each(|value| { .for_each(|value| {
log::trace!("adding variable {}", Var(value)); log::trace!("adding variable {}", Var(value));
@ -188,7 +202,7 @@ impl Adf {
result result
} }
fn grounded_internal(&mut self, interpretation: &[Term]) -> Vec<Term> { fn grounded_internal(&self, interpretation: &[Term]) -> Vec<Term> {
let mut t_vals: usize = interpretation let mut t_vals: usize = interpretation
.iter() .iter()
.filter(|elem| elem.is_truth_value()) .filter(|elem| elem.is_truth_value())
@ -266,6 +280,43 @@ impl Adf {
.map(|(int, _grd)| int) .map(|(int, _grd)| int)
} }
/// Testing some parallel stable computation
pub fn stable_par(&mut self) -> Vec<Vec<Term>> {
let grounded = self.grounded();
TwoValuedInterpretationsIterator::new(&grounded)
.par_bridge()
.into_par_iter()
.map(|interpretation| {
let mut interpr = self.ac.clone();
for ac in interpr.iter_mut() {
*ac = interpretation
.iter()
.enumerate()
.fold(*ac, |acc, (var, term)| {
if term.is_truth_value() && !term.is_true() {
self.bdd.restrict(acc, Var(var), false)
} else {
acc
}
});
}
let grounded_check = self.grounded_internal(&interpr);
log::debug!(
"grounded candidate\n{:?}\n{:?}",
interpretation,
grounded_check
);
(interpretation, grounded_check)
})
.filter(|(int, grd)| {
int.iter()
.zip(grd.iter())
.all(|(it, gr)| it.compare_inf(gr))
})
.map(|(int, _grd)| int)
.collect::<Vec<_>>()
}
/// Computes the stable models. /// Computes the stable models.
/// Returns a vector with all stable models, using a single-formula representation in biodivine to enumerate the possible models. /// Returns a vector with all stable models, using a single-formula representation in biodivine to enumerate the possible models.
/// Note that the biodivine adf needs to be the one which instantiated the adf (if applicable). /// Note that the biodivine adf needs to be the one which instantiated the adf (if applicable).
@ -407,7 +458,7 @@ impl Adf {
fn two_val_model_counts<H>(&mut self, interpr: &[Term], heuristic: H) -> Vec<Vec<Term>> fn two_val_model_counts<H>(&mut self, interpr: &[Term], heuristic: H) -> Vec<Vec<Term>>
where where
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy, H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy + Sync,
{ {
self.two_val_model_counts_logic(interpr, &vec![Term::UND; interpr.len()], 0, heuristic) self.two_val_model_counts_logic(interpr, &vec![Term::UND; interpr.len()], 0, heuristic)
} }
@ -461,18 +512,18 @@ impl Adf {
} }
fn two_val_model_counts_logic<H>( fn two_val_model_counts_logic<H>(
&mut self, &self,
interpr: &[Term], interpr: &[Term],
will_be: &[Term], will_be: &[Term],
depth: usize, depth: usize,
heuristic: H, heuristic: H,
) -> Vec<Vec<Term>> ) -> Vec<Vec<Term>>
where where
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy, H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy + Sync,
{ {
log::debug!("two_val_model_recursion_depth: {}/{}", depth, interpr.len()); log::debug!("two_val_model_recursion_depth: {}/{}", depth, interpr.len());
if let Some((idx, ac)) = interpr if let Some((idx, ac)) = interpr
.iter() .par_iter()
.enumerate() .enumerate()
.filter(|(idx, val)| !(val.is_truth_value() || will_be[*idx].is_truth_value())) .filter(|(idx, val)| !(val.is_truth_value() || will_be[*idx].is_truth_value()))
.min_by(|(idx_a, val_a), (idx_b, val_b)| { .min_by(|(idx_a, val_a), (idx_b, val_b)| {
@ -484,7 +535,8 @@ impl Adf {
) )
}) })
{ {
let mut result = Vec::new(); // let mut result = Vec::new();
let result: Mutex<Vec<Vec<Term>>> = Mutex::new(Vec::new());
let check_models = !self.bdd.paths(*ac, true).more_models(); let check_models = !self.bdd.paths(*ac, true).more_models();
log::trace!( log::trace!(
"Identified Var({}) with ac {:?} to be {}", "Identified Var({}) with ac {:?} to be {}",
@ -521,17 +573,27 @@ impl Adf {
new_int[idx] = if check_models { Term::TOP } else { Term::BOT }; new_int[idx] = if check_models { Term::TOP } else { Term::BOT };
let upd_int = self.update_interpretation_fixpoint(&new_int); let upd_int = self.update_interpretation_fixpoint(&new_int);
if self.check_consistency(&upd_int, will_be) { if self.check_consistency(&upd_int, will_be) {
result.append(&mut self.two_val_model_counts_logic( let mut append = self.two_val_model_counts_logic(
&upd_int, &upd_int,
will_be, will_be,
depth + 1, depth + 1,
heuristic, heuristic,
)); );
result
.lock()
.expect("Failed to lock mutex for results")
.append(&mut append);
} }
} }
res res
}); });
log::trace!("results found so far:{}", result.len()); log::trace!(
"results found so far:{}",
result
.lock()
.expect("Failed to lock mutex for results")
.len()
);
// checked one alternative, we can now conclude that only the other option may work // checked one alternative, we can now conclude that only the other option may work
log::debug!("checked one alternative, concluding the other value"); log::debug!("checked one alternative, concluding the other value");
let new_int = interpr let new_int = interpr
@ -546,15 +608,23 @@ impl Adf {
if new_int[idx].no_inf_inconsistency(&upd_int[idx]) { if new_int[idx].no_inf_inconsistency(&upd_int[idx]) {
let mut must_be_new = will_be.to_vec(); let mut must_be_new = will_be.to_vec();
must_be_new[idx] = new_int[idx]; must_be_new[idx] = new_int[idx];
result.append(&mut self.two_val_model_counts_logic( let mut append = self.two_val_model_counts_logic(
&upd_int, &upd_int,
&must_be_new, &must_be_new,
depth + 1, depth + 1,
heuristic, heuristic,
)); );
}
}
result result
.lock()
.expect("Failed to lock mutex for results")
.append(&mut append);
}
}
let x = result
.lock()
.expect("Failed to lock mutex for results")
.clone();
x
} else { } else {
// filter has created empty iterator // filter has created empty iterator
let concluded = interpr let concluded = interpr
@ -578,7 +648,7 @@ impl Adf {
} }
} }
fn update_interpretation_fixpoint(&mut self, interpretation: &[Term]) -> Vec<Term> { fn update_interpretation_fixpoint(&self, interpretation: &[Term]) -> Vec<Term> {
let mut cur_int = interpretation.to_vec(); let mut cur_int = interpretation.to_vec();
loop { loop {
let new_int = self.update_interpretation(interpretation); let new_int = self.update_interpretation(interpretation);
@ -590,11 +660,11 @@ impl Adf {
} }
} }
fn update_interpretation(&mut self, interpretation: &[Term]) -> Vec<Term> { fn update_interpretation(&self, interpretation: &[Term]) -> Vec<Term> {
self.apply_interpretation(interpretation, interpretation) self.apply_interpretation(interpretation, interpretation)
} }
fn apply_interpretation(&mut self, ac: &[Term], interpretation: &[Term]) -> Vec<Term> { fn apply_interpretation(&self, ac: &[Term], interpretation: &[Term]) -> Vec<Term> {
ac.iter() ac.iter()
.map(|ac| { .map(|ac| {
interpretation interpretation
@ -611,7 +681,7 @@ impl Adf {
.collect::<Vec<Term>>() .collect::<Vec<Term>>()
} }
fn check_consistency(&mut self, interpretation: &[Term], will_be: &[Term]) -> bool { fn check_consistency(&self, interpretation: &[Term], will_be: &[Term]) -> bool {
interpretation interpretation
.iter() .iter()
.zip(will_be.iter()) .zip(will_be.iter())
@ -712,11 +782,41 @@ mod test {
parser.parse()(input).unwrap(); parser.parse()(input).unwrap();
let adf = Adf::from_parser(&parser); let adf = Adf::from_parser(&parser);
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a"); assert_eq!(
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "c"); adf.ordering
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "b"); .names()
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "e"); .read()
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "d"); .expect("Failed to gain read lock")[0],
"a"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[1],
"c"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[2],
"b"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[3],
"e"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[4],
"d"
);
assert_eq!(adf.ac, vec![Term(4), Term(2), Term(7), Term(15), Term(12)]); assert_eq!(adf.ac, vec![Term(4), Term(2), Term(7), Term(15), Term(12)]);
@ -727,11 +827,41 @@ mod test {
parser.varsort_alphanum(); parser.varsort_alphanum();
let adf = Adf::from_parser(&parser); let adf = Adf::from_parser(&parser);
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a"); assert_eq!(
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "b"); adf.ordering
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "c"); .names()
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "d"); .read()
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "e"); .expect("Failed to gain read lock")[0],
"a"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[1],
"b"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[2],
"c"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[3],
"d"
);
assert_eq!(
adf.ordering
.names()
.read()
.expect("Failed to gain read lock")[4],
"e"
);
assert_eq!(adf.ac, vec![Term(3), Term(7), Term(2), Term(11), Term(13)]); assert_eq!(adf.ac, vec![Term(3), Term(7), Term(2), Term(11), Term(13)]);
} }

View File

@ -40,7 +40,11 @@ impl Adf {
pub fn from_parser(parser: &AdfParser) -> Self { pub fn from_parser(parser: &AdfParser) -> Self {
log::info!("[Start] instantiating BDD"); log::info!("[Start] instantiating BDD");
let mut bdd_var_builder = biodivine_lib_bdd::BddVariableSetBuilder::new(); let mut bdd_var_builder = biodivine_lib_bdd::BddVariableSetBuilder::new();
let namelist = parser.namelist_rc_refcell().as_ref().borrow().clone(); let namelist = parser
.namelist_rc_refcell()
.read()
.expect("Failed to gain read lock")
.clone();
let slice_vec: Vec<&str> = namelist.iter().map(<_>::as_ref).collect(); let slice_vec: Vec<&str> = namelist.iter().map(<_>::as_ref).collect();
bdd_var_builder.make_variables(&slice_vec); bdd_var_builder.make_variables(&slice_vec);
let bdd_variables = bdd_var_builder.build(); let bdd_variables = bdd_var_builder.build();
@ -51,7 +55,11 @@ impl Adf {
), ),
ac: vec![ ac: vec![
bdd_variables.mk_false(); bdd_variables.mk_false();
parser.namelist_rc_refcell().as_ref().borrow().len() parser
.namelist_rc_refcell()
.read()
.expect("Failed to gain read lock")
.len()
], ],
vars: bdd_variables.variables(), vars: bdd_variables.variables(),
varset: bdd_variables, varset: bdd_variables,

View File

@ -2,27 +2,33 @@
use super::{Term, Var}; use super::{Term, Var};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc}; use std::{
cell::RefCell,
collections::HashMap,
fmt::Display,
rc::Rc,
sync::{Arc, RwLock},
};
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub(crate) struct VarContainer { pub(crate) struct VarContainer {
names: Rc<RefCell<Vec<String>>>, names: Arc<RwLock<Vec<String>>>,
mapping: Rc<RefCell<HashMap<String, usize>>>, mapping: Arc<RwLock<HashMap<String, usize>>>,
} }
impl Default for VarContainer { impl Default for VarContainer {
fn default() -> Self { fn default() -> Self {
VarContainer { VarContainer {
names: Rc::new(RefCell::new(Vec::new())), names: Arc::new(RwLock::new(Vec::new())),
mapping: Rc::new(RefCell::new(HashMap::new())), mapping: Arc::new(RwLock::new(HashMap::new())),
} }
} }
} }
impl VarContainer { impl VarContainer {
pub fn from_parser( pub fn from_parser(
names: Rc<RefCell<Vec<String>>>, names: Arc<RwLock<Vec<String>>>,
mapping: Rc<RefCell<HashMap<String, usize>>>, mapping: Arc<RwLock<HashMap<String, usize>>>,
) -> VarContainer { ) -> VarContainer {
VarContainer { names, mapping } VarContainer { names, mapping }
} }
@ -35,16 +41,24 @@ impl VarContainer {
} }
pub fn variable(&self, name: &str) -> Option<Var> { pub fn variable(&self, name: &str) -> Option<Var> {
self.mapping.borrow().get(name).map(|val| Var(*val)) self.mapping
.read()
.expect("Failed to gain read lock")
.get(name)
.map(|val| Var(*val))
} }
pub fn name(&self, var: Var) -> Option<String> { pub fn name(&self, var: Var) -> Option<String> {
self.names.borrow().get(var.value()).cloned() self.names
.read()
.expect("Failed to gain read lock")
.get(var.value())
.cloned()
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn names(&self) -> Rc<RefCell<Vec<String>>> { pub fn names(&self) -> Arc<RwLock<Vec<String>>> {
Rc::clone(&self.names) Arc::clone(&self.names)
} }
} }
/// A struct which holds the dictionary to print interpretations and allows to instantiate printable interpretations. /// A struct which holds the dictionary to print interpretations and allows to instantiate printable interpretations.

View File

@ -3,30 +3,36 @@
pub mod vectorize; pub mod vectorize;
use crate::datatypes::*; use crate::datatypes::*;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::borrow::Borrow;
use std::collections::HashSet; use std::collections::HashSet;
use std::rc::Rc; use std::rc::Rc;
use std::sync::{Arc, RwLock};
use std::{cell::RefCell, cmp::min, collections::HashMap, fmt::Display}; use std::{cell::RefCell, cmp::min, collections::HashMap, fmt::Display};
const R_FAIL: &str = "Failed to gain read lock";
const W_FAIL: &str = "Failed to gain write lock";
/// Contains the data of (possibly) multiple roBDDs, managed over one collection of nodes. /// Contains the data of (possibly) multiple roBDDs, managed over one collection of nodes.
/// It has a couple of methods to instantiate, update, and query properties on a given roBDD. /// It has a couple of methods to instantiate, update, and query properties on a given roBDD.
/// Each roBDD is identified by its corresponding [`Term`], which implicitly identifies the root node of a roBDD. /// Each roBDD is identified by its corresponding [`Term`], which implicitly identifies the root node of a roBDD.
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct Bdd { pub struct Bdd {
nodes: Rc<RefCell<Vec<BddNode>>>, nodes: Arc<RwLock<Vec<BddNode>>>,
#[cfg(feature = "variablelist")] #[cfg(feature = "variablelist")]
#[serde(skip)] #[serde(skip)]
var_deps: Rc<RefCell<Vec<HashSet<Var>>>>, var_deps: Arc<RwLock<Vec<HashSet<Var>>>>,
//#[serde(with = "vectorize")] //#[serde(with = "vectorize")]
#[serde(skip)] #[serde(skip)]
cache: Rc<RefCell<HashMap<BddNode, Term>>>, cache: Arc<RwLock<HashMap<BddNode, Term>>>,
#[serde(skip, default = "Bdd::default_count_cache")] #[serde(skip, default = "Bdd::default_count_cache")]
count_cache: Rc<RefCell<HashMap<Term, CountNode>>>, count_cache: Arc<RwLock<HashMap<Term, CountNode>>>,
} }
impl Display for Bdd { impl Display for Bdd {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, " ")?; writeln!(f, " ")?;
for (idx, elem) in self.nodes.borrow().iter().enumerate() { // for (idx, elem) in self.nodes.borrow().iter().enumerate() {
for (idx, elem) in self.nodes.read().expect(R_FAIL).iter().enumerate() {
writeln!(f, "{} {}", idx, *elem)?; writeln!(f, "{} {}", idx, *elem)?;
} }
Ok(()) Ok(())
@ -56,26 +62,28 @@ impl Bdd {
#[cfg(feature = "adhoccounting")] #[cfg(feature = "adhoccounting")]
{ {
let result = Self { let result = Self {
nodes: Rc::new(RefCell::new(vec![BddNode::bot_node(), BddNode::top_node()])), nodes: Arc::new(RwLock::new(vec![BddNode::bot_node(), BddNode::top_node()])),
#[cfg(feature = "variablelist")] #[cfg(feature = "variablelist")]
var_deps: Rc::new(RefCell::new(vec![HashSet::new(), HashSet::new()])), var_deps: Arc::new(RwLock::new(vec![HashSet::new(), HashSet::new()])),
cache: Rc::new(RefCell::new(HashMap::new())), cache: Arc::new(RwLock::new(HashMap::new())),
count_cache: Rc::new(RefCell::new(HashMap::new())), count_cache: Arc::new(RwLock::new(HashMap::new())),
}; };
result result
.count_cache .count_cache
.borrow_mut() .write()
.expect(W_FAIL)
.insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0)); .insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
result result
.count_cache .count_cache
.borrow_mut() .write()
.expect(W_FAIL)
.insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0)); .insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
result result
} }
} }
fn default_count_cache() -> Rc<RefCell<HashMap<Term, CountNode>>> { fn default_count_cache() -> Arc<RwLock<HashMap<Term, CountNode>>> {
Rc::new(RefCell::new(HashMap::new())) Arc::new(RwLock::new(HashMap::new()))
} }
/// Instantiates a [variable][crate::datatypes::Var] and returns the representing roBDD as a [`Term`][crate::datatypes::Term]. /// Instantiates a [variable][crate::datatypes::Var] and returns the representing roBDD as a [`Term`][crate::datatypes::Term].
@ -136,7 +144,7 @@ impl Bdd {
positive: &[Var], positive: &[Var],
) -> Vec<(Vec<Var>, Vec<Var>)> { ) -> Vec<(Vec<Var>, Vec<Var>)> {
let mut result = Vec::new(); let mut result = Vec::new();
let node = self.nodes.borrow()[tree.value()]; let node = self.nodes.read().expect(R_FAIL)[tree.value()];
let var = node.var(); let var = node.var();
if tree.is_truth_value() { if tree.is_truth_value() {
return Vec::new(); return Vec::new();
@ -178,10 +186,10 @@ impl Bdd {
/// Restrict the value of a given [variable][crate::datatypes::Var] to **val**. /// Restrict the value of a given [variable][crate::datatypes::Var] to **val**.
pub fn restrict(&self, tree: Term, var: Var, val: bool) -> Term { pub fn restrict(&self, tree: Term, var: Var, val: bool) -> Term {
let node = self.nodes.borrow()[tree.0]; let node = self.nodes.read().expect("")[tree.0];
#[cfg(feature = "variablelist")] #[cfg(feature = "variablelist")]
{ {
if !self.var_deps.borrow()[tree.value()].contains(&var) { if !self.var_deps.read().expect(R_FAIL)[tree.value()].contains(&var) {
return tree; return tree;
} }
} }
@ -214,10 +222,10 @@ impl Bdd {
i i
} else { } else {
let minvar = Var(min( let minvar = Var(min(
self.nodes.borrow()[i.value()].var().value(), self.nodes.read().expect(R_FAIL)[i.value()].var().value(),
min( min(
self.nodes.borrow()[t.value()].var().value(), self.nodes.read().expect(R_FAIL)[t.value()].var().value(),
self.nodes.borrow()[e.value()].var().value(), self.nodes.read().expect(R_FAIL)[e.value()].var().value(),
), ),
)); ));
let itop = self.restrict(i, minvar, true); let itop = self.restrict(i, minvar, true);
@ -240,25 +248,30 @@ impl Bdd {
lo lo
} else { } else {
let node = BddNode::new(var, lo, hi); let node = BddNode::new(var, lo, hi);
if let Some(t) = self.cache.borrow().get(&node) { if let Some(t) = self.cache.read().expect(R_FAIL).get(&node) {
return *t; return *t;
} }
let new_term = Term(self.nodes.borrow().len()); let mut nodes = self.nodes.write().expect(W_FAIL);
self.nodes.borrow_mut().push(node); // check if we have some dangling updates
self.cache.borrow_mut().insert(node, new_term); if let Some(t) = self.cache.read().expect(R_FAIL).get(&node) {
return *t;
}
let new_term = Term(nodes.len());
nodes.push(node);
self.cache.write().expect("W_FAIL").insert(node, new_term);
#[cfg(feature = "variablelist")] #[cfg(feature = "variablelist")]
{ {
let mut var_set: HashSet<Var> = self.var_deps.borrow()[lo.value()] let mut var_set: HashSet<Var> = self.var_deps.read().expect(R_FAIL)[lo.value()]
.union(&self.var_deps.borrow()[hi.value()]) .union(&self.var_deps.read().expect(R_FAIL)[hi.value()])
.copied() .copied()
.collect(); .collect();
var_set.insert(var); var_set.insert(var);
self.var_deps.borrow_mut().push(var_set); self.var_deps.write().expect("W_FAIL").push(var_set);
} }
log::debug!("newterm: {} as {:?}", new_term, node); log::debug!("newterm: {} as {:?}", new_term, node);
#[cfg(feature = "adhoccounting")] #[cfg(feature = "adhoccounting")]
{ {
let mut count_cache = self.count_cache.borrow_mut(); let mut count_cache = self.count_cache.write().expect(W_FAIL);
let (lo_counts, lo_paths, lodepth) = let (lo_counts, lo_paths, lodepth) =
*count_cache.get(&lo).expect("Cache corrupted"); *count_cache.get(&lo).expect("Cache corrupted");
let (hi_counts, hi_paths, hidepth) = let (hi_counts, hi_paths, hidepth) =
@ -312,7 +325,7 @@ impl Bdd {
pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts { pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts {
#[cfg(feature = "adhoccounting")] #[cfg(feature = "adhoccounting")]
{ {
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").0; return self.count_cache.read().expect(R_FAIL).get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").0;
} }
#[cfg(not(feature = "adhoccounting"))] #[cfg(not(feature = "adhoccounting"))]
if _memoization { if _memoization {
@ -328,7 +341,7 @@ impl Bdd {
pub fn paths(&self, term: Term, _memoization: bool) -> ModelCounts { pub fn paths(&self, term: Term, _memoization: bool) -> ModelCounts {
#[cfg(feature = "adhoccounting")] #[cfg(feature = "adhoccounting")]
{ {
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").1; return self.count_cache.read().expect(R_FAIL).get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").1;
} }
#[cfg(not(feature = "adhoccounting"))] #[cfg(not(feature = "adhoccounting"))]
if _memoization { if _memoization {
@ -345,7 +358,7 @@ impl Bdd {
pub fn max_depth(&self, term: Term) -> usize { pub fn max_depth(&self, term: Term) -> usize {
#[cfg(feature = "adhoccounting")] #[cfg(feature = "adhoccounting")]
{ {
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").2; return self.count_cache.read().expect(R_FAIL).get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").2;
} }
#[cfg(not(feature = "adhoccounting"))] #[cfg(not(feature = "adhoccounting"))]
match self.count_cache.borrow().get(&term) { match self.count_cache.borrow().get(&term) {
@ -369,7 +382,7 @@ impl Bdd {
} else if term == Term::BOT { } else if term == Term::BOT {
(ModelCounts::bot(), ModelCounts::bot(), 0) (ModelCounts::bot(), ModelCounts::bot(), 0)
} else { } else {
let node = &self.nodes.borrow()[term.0]; let node = &self.nodes.read().expect(R_FAIL)[term.0];
let mut lo_exp = 0u32; let mut lo_exp = 0u32;
let mut hi_exp = 0u32; let mut hi_exp = 0u32;
let (lo_counts, lo_paths, lodepth) = self.modelcount_naive(node.lo()); let (lo_counts, lo_paths, lodepth) = self.modelcount_naive(node.lo());
@ -401,11 +414,11 @@ impl Bdd {
} else if term == Term::BOT { } else if term == Term::BOT {
(ModelCounts::bot(), ModelCounts::bot(), 0) (ModelCounts::bot(), ModelCounts::bot(), 0)
} else { } else {
if let Some(result) = self.count_cache.borrow().get(&term) { if let Some(result) = self.count_cache.read().expect(R_FAIL).get(&term) {
return *result; return *result;
} }
let result = { let result = {
let node = &self.nodes.borrow()[term.0]; let node = &self.nodes.read().expect(R_FAIL)[term.0];
let mut lo_exp = 0u32; let mut lo_exp = 0u32;
let mut hi_exp = 0u32; let mut hi_exp = 0u32;
let (lo_counts, lo_paths, lodepth) = self.modelcount_memoization(node.lo()); let (lo_counts, lo_paths, lodepth) = self.modelcount_memoization(node.lo());
@ -431,7 +444,7 @@ impl Bdd {
std::cmp::max(lodepth, hidepth) + 1, std::cmp::max(lodepth, hidepth) + 1,
) )
}; };
self.count_cache.borrow_mut().insert(term, result); self.count_cache.write().expect(W_FAIL).insert(term, result);
result result
} }
} }
@ -442,12 +455,14 @@ impl Bdd {
#[cfg(feature = "adhoccounting")] #[cfg(feature = "adhoccounting")]
{ {
self.count_cache self.count_cache
.borrow_mut() .write()
.expect(W_FAIL)
.insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0)); .insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
self.count_cache self.count_cache
.borrow_mut() .write()
.expect(W_FAIL)
.insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0)); .insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
for i in 0..self.nodes.borrow().len() { for i in 0..self.nodes.read().expect(R_FAIL).len() {
log::debug!("fixing Term({})", i); log::debug!("fixing Term({})", i);
self.modelcount_memoization(Term(i)); self.modelcount_memoization(Term(i));
} }
@ -456,16 +471,17 @@ impl Bdd {
fn generate_var_dependencies(&self) { fn generate_var_dependencies(&self) {
#[cfg(feature = "variablelist")] #[cfg(feature = "variablelist")]
self.nodes.borrow().iter().for_each(|node| { self.nodes.read().expect(R_FAIL).iter().for_each(|node| {
if node.var() >= Var::BOT { if node.var() >= Var::BOT {
self.var_deps.borrow_mut().push(HashSet::new()); self.var_deps.write().expect(W_FAIL).push(HashSet::new());
} else { } else {
let mut var_set: HashSet<Var> = self.var_deps.borrow()[node.lo().value()] let mut var_set: HashSet<Var> = self.var_deps.read().expect(R_FAIL)
.union(&self.var_deps.borrow()[node.hi().value()]) [node.lo().value()]
.union(&self.var_deps.read().expect(R_FAIL)[node.hi().value()])
.copied() .copied()
.collect(); .collect();
var_set.insert(node.var()); var_set.insert(node.var());
self.var_deps.borrow_mut().push(var_set); self.var_deps.write().expect(W_FAIL).push(var_set);
} }
}); });
} }
@ -474,7 +490,7 @@ impl Bdd {
pub fn var_dependencies(&self, tree: Term) -> HashSet<Var> { pub fn var_dependencies(&self, tree: Term) -> HashSet<Var> {
#[cfg(feature = "variablelist")] #[cfg(feature = "variablelist")]
{ {
self.var_deps.borrow()[tree.value()].clone() self.var_deps.read().expect(R_FAIL)[tree.value()].clone()
} }
#[cfg(not(feature = "variablelist"))] #[cfg(not(feature = "variablelist"))]
{ {
@ -525,7 +541,7 @@ mod test {
#[test] #[test]
fn newbdd() { fn newbdd() {
let bdd = Bdd::new(); let bdd = Bdd::new();
assert_eq!(bdd.nodes.borrow().len(), 2); assert_eq!(bdd.nodes.read().expect(R_FAIL).len(), 2);
} }
#[test] #[test]
@ -534,7 +550,7 @@ mod test {
assert_eq!(Bdd::constant(true), Term::TOP); assert_eq!(Bdd::constant(true), Term::TOP);
assert_eq!(Bdd::constant(false), Term::BOT); assert_eq!(Bdd::constant(false), Term::BOT);
assert_eq!(bdd.nodes.borrow().len(), 2); assert_eq!(bdd.nodes.read().expect(R_FAIL).len(), 2);
} }
#[test] #[test]
@ -645,13 +661,14 @@ mod test {
assert_eq!(bdd.models(v1, false), (1, 1).into()); assert_eq!(bdd.models(v1, false), (1, 1).into());
let mut x = bdd let mut x = bdd
.count_cache .count_cache
.borrow_mut() .write()
.expect(W_FAIL)
.iter() .iter()
.map(|(x, y)| (*x, *y)) .map(|(x, y)| (*x, *y))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
x.sort(); x.sort();
log::debug!("{:?}", formula1); log::debug!("{:?}", formula1);
for x in bdd.nodes.borrow().iter().enumerate() { for x in bdd.nodes.read().expect(R_FAIL).iter().enumerate() {
log::debug!("{:?}", x); log::debug!("{:?}", x);
} }
log::debug!("{:?}", x); log::debug!("{:?}", x);
@ -762,13 +779,14 @@ mod test {
bdd.not(formula4); bdd.not(formula4);
let constructed = bdd.var_deps.clone(); let constructed = bdd.var_deps.clone();
bdd.var_deps = Rc::new(RefCell::new(Vec::new())); bdd.var_deps = Arc::new(RwLock::new(Vec::new()));
bdd.generate_var_dependencies(); bdd.generate_var_dependencies();
constructed constructed
.borrow() .read()
.expect(R_FAIL)
.iter() .iter()
.zip(bdd.var_deps.borrow().iter()) .zip(bdd.var_deps.read().expect(R_FAIL).iter())
.for_each(|(left, right)| { .for_each(|(left, right)| {
assert!(*left == *right); assert!(*left == *right);
}); });

View File

@ -10,7 +10,11 @@ use nom::{
sequence::{delimited, preceded, separated_pair, terminated}, sequence::{delimited, preceded, separated_pair, terminated},
IResult, IResult,
}; };
use std::{cell::RefCell, collections::HashMap, rc::Rc}; use std::{
cell::RefCell,
collections::HashMap,
sync::{Arc, RwLock},
};
/// A representation of a formula, still using the strings from the input. /// A representation of a formula, still using the strings from the input.
#[derive(Clone, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
@ -127,8 +131,8 @@ impl std::fmt::Debug for Formula<'_> {
/// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead. /// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead.
#[derive(Debug)] #[derive(Debug)]
pub struct AdfParser<'a> { pub struct AdfParser<'a> {
namelist: Rc<RefCell<Vec<String>>>, namelist: Arc<RwLock<Vec<String>>>,
dict: Rc<RefCell<HashMap<String, usize>>>, dict: Arc<RwLock<HashMap<String, usize>>>,
formulae: RefCell<Vec<Formula<'a>>>, formulae: RefCell<Vec<Formula<'a>>>,
formulaname: RefCell<Vec<String>>, formulaname: RefCell<Vec<String>>,
} }
@ -136,8 +140,8 @@ pub struct AdfParser<'a> {
impl Default for AdfParser<'_> { impl Default for AdfParser<'_> {
fn default() -> Self { fn default() -> Self {
AdfParser { AdfParser {
namelist: Rc::new(RefCell::new(Vec::new())), namelist: Arc::new(RwLock::new(Vec::new())),
dict: Rc::new(RefCell::new(HashMap::new())), dict: Arc::new(RwLock::new(HashMap::new())),
formulae: RefCell::new(Vec::new()), formulae: RefCell::new(Vec::new()),
formulaname: RefCell::new(Vec::new()), formulaname: RefCell::new(Vec::new()),
} }
@ -176,8 +180,8 @@ where
fn parse_statement(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> { fn parse_statement(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
|input| { |input| {
let mut dict = self.dict.borrow_mut(); let mut dict = self.dict.write().expect("Failed to gain write lock");
let mut namelist = self.namelist.borrow_mut(); let mut namelist = self.namelist.write().expect("Failed to gain write lock");
let (remain, statement) = let (remain, statement) =
terminated(AdfParser::statement, terminated(tag("."), multispace0))(input)?; terminated(AdfParser::statement, terminated(tag("."), multispace0))(input)?;
if !dict.contains_key(statement) { if !dict.contains_key(statement) {
@ -205,11 +209,16 @@ impl AdfParser<'_> {
fn regenerate_indizes(&self) { fn regenerate_indizes(&self) {
self.namelist self.namelist
.as_ref() .as_ref()
.borrow() .read()
.expect("Failed to gain read lock")
.iter() .iter()
.enumerate() .enumerate()
.for_each(|(i, elem)| { .for_each(|(i, elem)| {
self.dict.as_ref().borrow_mut().insert(elem.clone(), i); self.dict
.as_ref()
.write()
.expect("Failed to gain write lock")
.insert(elem.clone(), i);
}); });
} }
@ -217,7 +226,10 @@ impl AdfParser<'_> {
/// Results, which got used before might become corrupted. /// Results, which got used before might become corrupted.
/// Ensure that all used data is physically copied. /// Ensure that all used data is physically copied.
pub fn varsort_lexi(&self) -> &Self { pub fn varsort_lexi(&self) -> &Self {
self.namelist.as_ref().borrow_mut().sort_unstable(); self.namelist
.write()
.expect("Failed to gain write lock")
.sort_unstable();
self.regenerate_indizes(); self.regenerate_indizes();
self self
} }
@ -227,8 +239,8 @@ impl AdfParser<'_> {
/// Ensure that all used data is physically copied. /// Ensure that all used data is physically copied.
pub fn varsort_alphanum(&self) -> &Self { pub fn varsort_alphanum(&self) -> &Self {
self.namelist self.namelist
.as_ref() .write()
.borrow_mut() .expect("Failed to gain write lock")
.string_sort_unstable(natural_lexical_cmp); .string_sort_unstable(natural_lexical_cmp);
self.regenerate_indizes(); self.regenerate_indizes();
self self
@ -343,14 +355,23 @@ impl AdfParser<'_> {
/// Allows insight of the number of parsed statements. /// Allows insight of the number of parsed statements.
pub fn dict_size(&self) -> usize { pub fn dict_size(&self) -> usize {
//self.dict.borrow().len() //self.dict.borrow().len()
self.dict.as_ref().borrow().len() self.dict
.as_ref()
.read()
.expect("Failed to gain read lock")
.len()
} }
/// Returns the number-representation and position of a given statement in string-representation. /// Returns the number-representation and position of a given statement in string-representation.
/// ///
/// Will return [None] if the string does no occur in the dictionary. /// Will return [None] if the string does no occur in the dictionary.
pub fn dict_value(&self, value: &str) -> Option<usize> { pub fn dict_value(&self, value: &str) -> Option<usize> {
self.dict.as_ref().borrow().get(value).copied() self.dict
.as_ref()
.read()
.expect("Failed to gain read lock")
.get(value)
.copied()
} }
/// Returns the acceptance condition of a statement at the given position. /// Returns the acceptance condition of a statement at the given position.
@ -360,12 +381,12 @@ impl AdfParser<'_> {
self.formulae.borrow().get(idx).cloned() self.formulae.borrow().get(idx).cloned()
} }
pub(crate) fn dict_rc_refcell(&self) -> Rc<RefCell<HashMap<String, usize>>> { pub(crate) fn dict_rc_refcell(&self) -> Arc<RwLock<HashMap<String, usize>>> {
Rc::clone(&self.dict) Arc::clone(&self.dict)
} }
pub(crate) fn namelist_rc_refcell(&self) -> Rc<RefCell<Vec<String>>> { pub(crate) fn namelist_rc_refcell(&self) -> Arc<RwLock<Vec<String>>> {
Rc::clone(&self.namelist) Arc::clone(&self.namelist)
} }
pub(crate) fn formula_count(&self) -> usize { pub(crate) fn formula_count(&self) -> usize {
@ -379,8 +400,8 @@ impl AdfParser<'_> {
.map(|name| { .map(|name| {
*self *self
.dict .dict
.as_ref() .read()
.borrow() .expect("Failed to gain read lock")
.get(name) .get(name)
.expect("Dictionary should contain all the used formulanames") .expect("Dictionary should contain all the used formulanames")
}) })