mirror of
https://github.com/ellmau/adf-obdd.git
synced 2025-12-19 09:29:36 +01:00
Added some paralelism ideas, none have been working
This commit is contained in:
parent
be68af7d05
commit
31e02c6de4
85
Cargo.lock
generated
85
Cargo.lock
generated
@ -14,6 +14,7 @@ dependencies = [
|
||||
"nom",
|
||||
"quickcheck",
|
||||
"quickcheck_macros",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"test-log",
|
||||
@ -172,6 +173,41 @@ dependencies = [
|
||||
"os_str_bytes",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-channel"
|
||||
version = "0.5.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"cfg-if",
|
||||
"crossbeam-utils",
|
||||
"lazy_static",
|
||||
"memoffset",
|
||||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.7"
|
||||
@ -428,6 +464,15 @@ version = "2.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
|
||||
|
||||
[[package]]
|
||||
name = "memoffset"
|
||||
version = "0.6.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "minimal-lexical"
|
||||
version = "0.2.1"
|
||||
@ -459,6 +504,16 @@ dependencies = [
|
||||
"autocfg",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.9.0"
|
||||
@ -630,6 +685,30 @@ dependencies = [
|
||||
"rand_core 0.5.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon"
|
||||
version = "1.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"crossbeam-deque",
|
||||
"either",
|
||||
"rayon-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon-core"
|
||||
version = "1.9.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-deque",
|
||||
"crossbeam-utils",
|
||||
"num_cpus",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.2.11"
|
||||
@ -686,6 +765,12 @@ dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.137"
|
||||
|
||||
@ -216,7 +216,7 @@ impl App {
|
||||
}
|
||||
|
||||
if self.stable {
|
||||
for model in naive_adf.stable() {
|
||||
for model in naive_adf.stable_par() {
|
||||
print!("{}", printer.print_interpretation(&model));
|
||||
}
|
||||
}
|
||||
|
||||
@ -30,6 +30,7 @@ serde = { version = "1.0", features = ["derive","rc"] }
|
||||
serde_json = "1.0"
|
||||
biodivine-lib-bdd = "0.3.0"
|
||||
derivative = "2.2.0"
|
||||
rayon = "1.5.3"
|
||||
|
||||
[dev-dependencies]
|
||||
test-log = "0.2"
|
||||
|
||||
188
lib/src/adf.rs
188
lib/src/adf.rs
@ -5,6 +5,9 @@ This module describes the abstract dialectical framework.
|
||||
- computing fixpoints
|
||||
*/
|
||||
|
||||
use std::{marker::Sync, sync::Mutex};
|
||||
|
||||
use rayon::prelude::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
@ -49,9 +52,20 @@ impl Adf {
|
||||
parser.dict_rc_refcell(),
|
||||
),
|
||||
bdd: Bdd::new(),
|
||||
ac: vec![Term(0); parser.namelist_rc_refcell().as_ref().borrow().len()],
|
||||
ac: vec![
|
||||
Term(0);
|
||||
parser
|
||||
.namelist_rc_refcell()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.len()
|
||||
],
|
||||
};
|
||||
(0..parser.namelist_rc_refcell().borrow().len())
|
||||
(0..parser
|
||||
.namelist_rc_refcell()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.len())
|
||||
.into_iter()
|
||||
.for_each(|value| {
|
||||
log::trace!("adding variable {}", Var(value));
|
||||
@ -188,7 +202,7 @@ impl Adf {
|
||||
result
|
||||
}
|
||||
|
||||
fn grounded_internal(&mut self, interpretation: &[Term]) -> Vec<Term> {
|
||||
fn grounded_internal(&self, interpretation: &[Term]) -> Vec<Term> {
|
||||
let mut t_vals: usize = interpretation
|
||||
.iter()
|
||||
.filter(|elem| elem.is_truth_value())
|
||||
@ -266,6 +280,43 @@ impl Adf {
|
||||
.map(|(int, _grd)| int)
|
||||
}
|
||||
|
||||
/// Testing some parallel stable computation
|
||||
pub fn stable_par(&mut self) -> Vec<Vec<Term>> {
|
||||
let grounded = self.grounded();
|
||||
TwoValuedInterpretationsIterator::new(&grounded)
|
||||
.par_bridge()
|
||||
.into_par_iter()
|
||||
.map(|interpretation| {
|
||||
let mut interpr = self.ac.clone();
|
||||
for ac in interpr.iter_mut() {
|
||||
*ac = interpretation
|
||||
.iter()
|
||||
.enumerate()
|
||||
.fold(*ac, |acc, (var, term)| {
|
||||
if term.is_truth_value() && !term.is_true() {
|
||||
self.bdd.restrict(acc, Var(var), false)
|
||||
} else {
|
||||
acc
|
||||
}
|
||||
});
|
||||
}
|
||||
let grounded_check = self.grounded_internal(&interpr);
|
||||
log::debug!(
|
||||
"grounded candidate\n{:?}\n{:?}",
|
||||
interpretation,
|
||||
grounded_check
|
||||
);
|
||||
(interpretation, grounded_check)
|
||||
})
|
||||
.filter(|(int, grd)| {
|
||||
int.iter()
|
||||
.zip(grd.iter())
|
||||
.all(|(it, gr)| it.compare_inf(gr))
|
||||
})
|
||||
.map(|(int, _grd)| int)
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
/// Computes the stable models.
|
||||
/// Returns a vector with all stable models, using a single-formula representation in biodivine to enumerate the possible models.
|
||||
/// Note that the biodivine adf needs to be the one which instantiated the adf (if applicable).
|
||||
@ -407,7 +458,7 @@ impl Adf {
|
||||
|
||||
fn two_val_model_counts<H>(&mut self, interpr: &[Term], heuristic: H) -> Vec<Vec<Term>>
|
||||
where
|
||||
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy,
|
||||
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy + Sync,
|
||||
{
|
||||
self.two_val_model_counts_logic(interpr, &vec![Term::UND; interpr.len()], 0, heuristic)
|
||||
}
|
||||
@ -461,18 +512,18 @@ impl Adf {
|
||||
}
|
||||
|
||||
fn two_val_model_counts_logic<H>(
|
||||
&mut self,
|
||||
&self,
|
||||
interpr: &[Term],
|
||||
will_be: &[Term],
|
||||
depth: usize,
|
||||
heuristic: H,
|
||||
) -> Vec<Vec<Term>>
|
||||
where
|
||||
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy,
|
||||
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy + Sync,
|
||||
{
|
||||
log::debug!("two_val_model_recursion_depth: {}/{}", depth, interpr.len());
|
||||
if let Some((idx, ac)) = interpr
|
||||
.iter()
|
||||
.par_iter()
|
||||
.enumerate()
|
||||
.filter(|(idx, val)| !(val.is_truth_value() || will_be[*idx].is_truth_value()))
|
||||
.min_by(|(idx_a, val_a), (idx_b, val_b)| {
|
||||
@ -484,7 +535,8 @@ impl Adf {
|
||||
)
|
||||
})
|
||||
{
|
||||
let mut result = Vec::new();
|
||||
// let mut result = Vec::new();
|
||||
let result: Mutex<Vec<Vec<Term>>> = Mutex::new(Vec::new());
|
||||
let check_models = !self.bdd.paths(*ac, true).more_models();
|
||||
log::trace!(
|
||||
"Identified Var({}) with ac {:?} to be {}",
|
||||
@ -521,17 +573,27 @@ impl Adf {
|
||||
new_int[idx] = if check_models { Term::TOP } else { Term::BOT };
|
||||
let upd_int = self.update_interpretation_fixpoint(&new_int);
|
||||
if self.check_consistency(&upd_int, will_be) {
|
||||
result.append(&mut self.two_val_model_counts_logic(
|
||||
let mut append = self.two_val_model_counts_logic(
|
||||
&upd_int,
|
||||
will_be,
|
||||
depth + 1,
|
||||
heuristic,
|
||||
));
|
||||
);
|
||||
result
|
||||
.lock()
|
||||
.expect("Failed to lock mutex for results")
|
||||
.append(&mut append);
|
||||
}
|
||||
}
|
||||
res
|
||||
});
|
||||
log::trace!("results found so far:{}", result.len());
|
||||
log::trace!(
|
||||
"results found so far:{}",
|
||||
result
|
||||
.lock()
|
||||
.expect("Failed to lock mutex for results")
|
||||
.len()
|
||||
);
|
||||
// checked one alternative, we can now conclude that only the other option may work
|
||||
log::debug!("checked one alternative, concluding the other value");
|
||||
let new_int = interpr
|
||||
@ -546,15 +608,23 @@ impl Adf {
|
||||
if new_int[idx].no_inf_inconsistency(&upd_int[idx]) {
|
||||
let mut must_be_new = will_be.to_vec();
|
||||
must_be_new[idx] = new_int[idx];
|
||||
result.append(&mut self.two_val_model_counts_logic(
|
||||
let mut append = self.two_val_model_counts_logic(
|
||||
&upd_int,
|
||||
&must_be_new,
|
||||
depth + 1,
|
||||
heuristic,
|
||||
));
|
||||
}
|
||||
}
|
||||
);
|
||||
result
|
||||
.lock()
|
||||
.expect("Failed to lock mutex for results")
|
||||
.append(&mut append);
|
||||
}
|
||||
}
|
||||
let x = result
|
||||
.lock()
|
||||
.expect("Failed to lock mutex for results")
|
||||
.clone();
|
||||
x
|
||||
} else {
|
||||
// filter has created empty iterator
|
||||
let concluded = interpr
|
||||
@ -578,7 +648,7 @@ impl Adf {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_interpretation_fixpoint(&mut self, interpretation: &[Term]) -> Vec<Term> {
|
||||
fn update_interpretation_fixpoint(&self, interpretation: &[Term]) -> Vec<Term> {
|
||||
let mut cur_int = interpretation.to_vec();
|
||||
loop {
|
||||
let new_int = self.update_interpretation(interpretation);
|
||||
@ -590,11 +660,11 @@ impl Adf {
|
||||
}
|
||||
}
|
||||
|
||||
fn update_interpretation(&mut self, interpretation: &[Term]) -> Vec<Term> {
|
||||
fn update_interpretation(&self, interpretation: &[Term]) -> Vec<Term> {
|
||||
self.apply_interpretation(interpretation, interpretation)
|
||||
}
|
||||
|
||||
fn apply_interpretation(&mut self, ac: &[Term], interpretation: &[Term]) -> Vec<Term> {
|
||||
fn apply_interpretation(&self, ac: &[Term], interpretation: &[Term]) -> Vec<Term> {
|
||||
ac.iter()
|
||||
.map(|ac| {
|
||||
interpretation
|
||||
@ -611,7 +681,7 @@ impl Adf {
|
||||
.collect::<Vec<Term>>()
|
||||
}
|
||||
|
||||
fn check_consistency(&mut self, interpretation: &[Term], will_be: &[Term]) -> bool {
|
||||
fn check_consistency(&self, interpretation: &[Term], will_be: &[Term]) -> bool {
|
||||
interpretation
|
||||
.iter()
|
||||
.zip(will_be.iter())
|
||||
@ -712,11 +782,41 @@ mod test {
|
||||
parser.parse()(input).unwrap();
|
||||
|
||||
let adf = Adf::from_parser(&parser);
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "c");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "b");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "e");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "d");
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[0],
|
||||
"a"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[1],
|
||||
"c"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[2],
|
||||
"b"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[3],
|
||||
"e"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[4],
|
||||
"d"
|
||||
);
|
||||
|
||||
assert_eq!(adf.ac, vec![Term(4), Term(2), Term(7), Term(15), Term(12)]);
|
||||
|
||||
@ -727,11 +827,41 @@ mod test {
|
||||
parser.varsort_alphanum();
|
||||
|
||||
let adf = Adf::from_parser(&parser);
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "b");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "c");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "d");
|
||||
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "e");
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[0],
|
||||
"a"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[1],
|
||||
"b"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[2],
|
||||
"c"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[3],
|
||||
"d"
|
||||
);
|
||||
assert_eq!(
|
||||
adf.ordering
|
||||
.names()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")[4],
|
||||
"e"
|
||||
);
|
||||
|
||||
assert_eq!(adf.ac, vec![Term(3), Term(7), Term(2), Term(11), Term(13)]);
|
||||
}
|
||||
|
||||
@ -40,7 +40,11 @@ impl Adf {
|
||||
pub fn from_parser(parser: &AdfParser) -> Self {
|
||||
log::info!("[Start] instantiating BDD");
|
||||
let mut bdd_var_builder = biodivine_lib_bdd::BddVariableSetBuilder::new();
|
||||
let namelist = parser.namelist_rc_refcell().as_ref().borrow().clone();
|
||||
let namelist = parser
|
||||
.namelist_rc_refcell()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.clone();
|
||||
let slice_vec: Vec<&str> = namelist.iter().map(<_>::as_ref).collect();
|
||||
bdd_var_builder.make_variables(&slice_vec);
|
||||
let bdd_variables = bdd_var_builder.build();
|
||||
@ -51,7 +55,11 @@ impl Adf {
|
||||
),
|
||||
ac: vec![
|
||||
bdd_variables.mk_false();
|
||||
parser.namelist_rc_refcell().as_ref().borrow().len()
|
||||
parser
|
||||
.namelist_rc_refcell()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.len()
|
||||
],
|
||||
vars: bdd_variables.variables(),
|
||||
varset: bdd_variables,
|
||||
|
||||
@ -2,27 +2,33 @@
|
||||
|
||||
use super::{Term, Var};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::HashMap,
|
||||
fmt::Display,
|
||||
rc::Rc,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub(crate) struct VarContainer {
|
||||
names: Rc<RefCell<Vec<String>>>,
|
||||
mapping: Rc<RefCell<HashMap<String, usize>>>,
|
||||
names: Arc<RwLock<Vec<String>>>,
|
||||
mapping: Arc<RwLock<HashMap<String, usize>>>,
|
||||
}
|
||||
|
||||
impl Default for VarContainer {
|
||||
fn default() -> Self {
|
||||
VarContainer {
|
||||
names: Rc::new(RefCell::new(Vec::new())),
|
||||
mapping: Rc::new(RefCell::new(HashMap::new())),
|
||||
names: Arc::new(RwLock::new(Vec::new())),
|
||||
mapping: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VarContainer {
|
||||
pub fn from_parser(
|
||||
names: Rc<RefCell<Vec<String>>>,
|
||||
mapping: Rc<RefCell<HashMap<String, usize>>>,
|
||||
names: Arc<RwLock<Vec<String>>>,
|
||||
mapping: Arc<RwLock<HashMap<String, usize>>>,
|
||||
) -> VarContainer {
|
||||
VarContainer { names, mapping }
|
||||
}
|
||||
@ -35,16 +41,24 @@ impl VarContainer {
|
||||
}
|
||||
|
||||
pub fn variable(&self, name: &str) -> Option<Var> {
|
||||
self.mapping.borrow().get(name).map(|val| Var(*val))
|
||||
self.mapping
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.get(name)
|
||||
.map(|val| Var(*val))
|
||||
}
|
||||
|
||||
pub fn name(&self, var: Var) -> Option<String> {
|
||||
self.names.borrow().get(var.value()).cloned()
|
||||
self.names
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.get(var.value())
|
||||
.cloned()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn names(&self) -> Rc<RefCell<Vec<String>>> {
|
||||
Rc::clone(&self.names)
|
||||
pub fn names(&self) -> Arc<RwLock<Vec<String>>> {
|
||||
Arc::clone(&self.names)
|
||||
}
|
||||
}
|
||||
/// A struct which holds the dictionary to print interpretations and allows to instantiate printable interpretations.
|
||||
|
||||
118
lib/src/obdd.rs
118
lib/src/obdd.rs
@ -3,30 +3,36 @@
|
||||
pub mod vectorize;
|
||||
use crate::datatypes::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::HashSet;
|
||||
use std::rc::Rc;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{cell::RefCell, cmp::min, collections::HashMap, fmt::Display};
|
||||
|
||||
const R_FAIL: &str = "Failed to gain read lock";
|
||||
const W_FAIL: &str = "Failed to gain write lock";
|
||||
|
||||
/// Contains the data of (possibly) multiple roBDDs, managed over one collection of nodes.
|
||||
/// It has a couple of methods to instantiate, update, and query properties on a given roBDD.
|
||||
/// Each roBDD is identified by its corresponding [`Term`], which implicitly identifies the root node of a roBDD.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Bdd {
|
||||
nodes: Rc<RefCell<Vec<BddNode>>>,
|
||||
nodes: Arc<RwLock<Vec<BddNode>>>,
|
||||
#[cfg(feature = "variablelist")]
|
||||
#[serde(skip)]
|
||||
var_deps: Rc<RefCell<Vec<HashSet<Var>>>>,
|
||||
var_deps: Arc<RwLock<Vec<HashSet<Var>>>>,
|
||||
//#[serde(with = "vectorize")]
|
||||
#[serde(skip)]
|
||||
cache: Rc<RefCell<HashMap<BddNode, Term>>>,
|
||||
cache: Arc<RwLock<HashMap<BddNode, Term>>>,
|
||||
#[serde(skip, default = "Bdd::default_count_cache")]
|
||||
count_cache: Rc<RefCell<HashMap<Term, CountNode>>>,
|
||||
count_cache: Arc<RwLock<HashMap<Term, CountNode>>>,
|
||||
}
|
||||
|
||||
impl Display for Bdd {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, " ")?;
|
||||
for (idx, elem) in self.nodes.borrow().iter().enumerate() {
|
||||
// for (idx, elem) in self.nodes.borrow().iter().enumerate() {
|
||||
for (idx, elem) in self.nodes.read().expect(R_FAIL).iter().enumerate() {
|
||||
writeln!(f, "{} {}", idx, *elem)?;
|
||||
}
|
||||
Ok(())
|
||||
@ -56,26 +62,28 @@ impl Bdd {
|
||||
#[cfg(feature = "adhoccounting")]
|
||||
{
|
||||
let result = Self {
|
||||
nodes: Rc::new(RefCell::new(vec![BddNode::bot_node(), BddNode::top_node()])),
|
||||
nodes: Arc::new(RwLock::new(vec![BddNode::bot_node(), BddNode::top_node()])),
|
||||
#[cfg(feature = "variablelist")]
|
||||
var_deps: Rc::new(RefCell::new(vec![HashSet::new(), HashSet::new()])),
|
||||
cache: Rc::new(RefCell::new(HashMap::new())),
|
||||
count_cache: Rc::new(RefCell::new(HashMap::new())),
|
||||
var_deps: Arc::new(RwLock::new(vec![HashSet::new(), HashSet::new()])),
|
||||
cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
count_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
};
|
||||
result
|
||||
.count_cache
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.expect(W_FAIL)
|
||||
.insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
|
||||
result
|
||||
.count_cache
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.expect(W_FAIL)
|
||||
.insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
fn default_count_cache() -> Rc<RefCell<HashMap<Term, CountNode>>> {
|
||||
Rc::new(RefCell::new(HashMap::new()))
|
||||
fn default_count_cache() -> Arc<RwLock<HashMap<Term, CountNode>>> {
|
||||
Arc::new(RwLock::new(HashMap::new()))
|
||||
}
|
||||
|
||||
/// Instantiates a [variable][crate::datatypes::Var] and returns the representing roBDD as a [`Term`][crate::datatypes::Term].
|
||||
@ -136,7 +144,7 @@ impl Bdd {
|
||||
positive: &[Var],
|
||||
) -> Vec<(Vec<Var>, Vec<Var>)> {
|
||||
let mut result = Vec::new();
|
||||
let node = self.nodes.borrow()[tree.value()];
|
||||
let node = self.nodes.read().expect(R_FAIL)[tree.value()];
|
||||
let var = node.var();
|
||||
if tree.is_truth_value() {
|
||||
return Vec::new();
|
||||
@ -178,10 +186,10 @@ impl Bdd {
|
||||
|
||||
/// Restrict the value of a given [variable][crate::datatypes::Var] to **val**.
|
||||
pub fn restrict(&self, tree: Term, var: Var, val: bool) -> Term {
|
||||
let node = self.nodes.borrow()[tree.0];
|
||||
let node = self.nodes.read().expect("")[tree.0];
|
||||
#[cfg(feature = "variablelist")]
|
||||
{
|
||||
if !self.var_deps.borrow()[tree.value()].contains(&var) {
|
||||
if !self.var_deps.read().expect(R_FAIL)[tree.value()].contains(&var) {
|
||||
return tree;
|
||||
}
|
||||
}
|
||||
@ -214,10 +222,10 @@ impl Bdd {
|
||||
i
|
||||
} else {
|
||||
let minvar = Var(min(
|
||||
self.nodes.borrow()[i.value()].var().value(),
|
||||
self.nodes.read().expect(R_FAIL)[i.value()].var().value(),
|
||||
min(
|
||||
self.nodes.borrow()[t.value()].var().value(),
|
||||
self.nodes.borrow()[e.value()].var().value(),
|
||||
self.nodes.read().expect(R_FAIL)[t.value()].var().value(),
|
||||
self.nodes.read().expect(R_FAIL)[e.value()].var().value(),
|
||||
),
|
||||
));
|
||||
let itop = self.restrict(i, minvar, true);
|
||||
@ -240,25 +248,30 @@ impl Bdd {
|
||||
lo
|
||||
} else {
|
||||
let node = BddNode::new(var, lo, hi);
|
||||
if let Some(t) = self.cache.borrow().get(&node) {
|
||||
if let Some(t) = self.cache.read().expect(R_FAIL).get(&node) {
|
||||
return *t;
|
||||
}
|
||||
let new_term = Term(self.nodes.borrow().len());
|
||||
self.nodes.borrow_mut().push(node);
|
||||
self.cache.borrow_mut().insert(node, new_term);
|
||||
let mut nodes = self.nodes.write().expect(W_FAIL);
|
||||
// check if we have some dangling updates
|
||||
if let Some(t) = self.cache.read().expect(R_FAIL).get(&node) {
|
||||
return *t;
|
||||
}
|
||||
let new_term = Term(nodes.len());
|
||||
nodes.push(node);
|
||||
self.cache.write().expect("W_FAIL").insert(node, new_term);
|
||||
#[cfg(feature = "variablelist")]
|
||||
{
|
||||
let mut var_set: HashSet<Var> = self.var_deps.borrow()[lo.value()]
|
||||
.union(&self.var_deps.borrow()[hi.value()])
|
||||
let mut var_set: HashSet<Var> = self.var_deps.read().expect(R_FAIL)[lo.value()]
|
||||
.union(&self.var_deps.read().expect(R_FAIL)[hi.value()])
|
||||
.copied()
|
||||
.collect();
|
||||
var_set.insert(var);
|
||||
self.var_deps.borrow_mut().push(var_set);
|
||||
self.var_deps.write().expect("W_FAIL").push(var_set);
|
||||
}
|
||||
log::debug!("newterm: {} as {:?}", new_term, node);
|
||||
#[cfg(feature = "adhoccounting")]
|
||||
{
|
||||
let mut count_cache = self.count_cache.borrow_mut();
|
||||
let mut count_cache = self.count_cache.write().expect(W_FAIL);
|
||||
let (lo_counts, lo_paths, lodepth) =
|
||||
*count_cache.get(&lo).expect("Cache corrupted");
|
||||
let (hi_counts, hi_paths, hidepth) =
|
||||
@ -312,7 +325,7 @@ impl Bdd {
|
||||
pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts {
|
||||
#[cfg(feature = "adhoccounting")]
|
||||
{
|
||||
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").0;
|
||||
return self.count_cache.read().expect(R_FAIL).get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").0;
|
||||
}
|
||||
#[cfg(not(feature = "adhoccounting"))]
|
||||
if _memoization {
|
||||
@ -328,7 +341,7 @@ impl Bdd {
|
||||
pub fn paths(&self, term: Term, _memoization: bool) -> ModelCounts {
|
||||
#[cfg(feature = "adhoccounting")]
|
||||
{
|
||||
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").1;
|
||||
return self.count_cache.read().expect(R_FAIL).get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").1;
|
||||
}
|
||||
#[cfg(not(feature = "adhoccounting"))]
|
||||
if _memoization {
|
||||
@ -345,7 +358,7 @@ impl Bdd {
|
||||
pub fn max_depth(&self, term: Term) -> usize {
|
||||
#[cfg(feature = "adhoccounting")]
|
||||
{
|
||||
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").2;
|
||||
return self.count_cache.read().expect(R_FAIL).get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").2;
|
||||
}
|
||||
#[cfg(not(feature = "adhoccounting"))]
|
||||
match self.count_cache.borrow().get(&term) {
|
||||
@ -369,7 +382,7 @@ impl Bdd {
|
||||
} else if term == Term::BOT {
|
||||
(ModelCounts::bot(), ModelCounts::bot(), 0)
|
||||
} else {
|
||||
let node = &self.nodes.borrow()[term.0];
|
||||
let node = &self.nodes.read().expect(R_FAIL)[term.0];
|
||||
let mut lo_exp = 0u32;
|
||||
let mut hi_exp = 0u32;
|
||||
let (lo_counts, lo_paths, lodepth) = self.modelcount_naive(node.lo());
|
||||
@ -401,11 +414,11 @@ impl Bdd {
|
||||
} else if term == Term::BOT {
|
||||
(ModelCounts::bot(), ModelCounts::bot(), 0)
|
||||
} else {
|
||||
if let Some(result) = self.count_cache.borrow().get(&term) {
|
||||
if let Some(result) = self.count_cache.read().expect(R_FAIL).get(&term) {
|
||||
return *result;
|
||||
}
|
||||
let result = {
|
||||
let node = &self.nodes.borrow()[term.0];
|
||||
let node = &self.nodes.read().expect(R_FAIL)[term.0];
|
||||
let mut lo_exp = 0u32;
|
||||
let mut hi_exp = 0u32;
|
||||
let (lo_counts, lo_paths, lodepth) = self.modelcount_memoization(node.lo());
|
||||
@ -431,7 +444,7 @@ impl Bdd {
|
||||
std::cmp::max(lodepth, hidepth) + 1,
|
||||
)
|
||||
};
|
||||
self.count_cache.borrow_mut().insert(term, result);
|
||||
self.count_cache.write().expect(W_FAIL).insert(term, result);
|
||||
result
|
||||
}
|
||||
}
|
||||
@ -442,12 +455,14 @@ impl Bdd {
|
||||
#[cfg(feature = "adhoccounting")]
|
||||
{
|
||||
self.count_cache
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.expect(W_FAIL)
|
||||
.insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
|
||||
self.count_cache
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.expect(W_FAIL)
|
||||
.insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
|
||||
for i in 0..self.nodes.borrow().len() {
|
||||
for i in 0..self.nodes.read().expect(R_FAIL).len() {
|
||||
log::debug!("fixing Term({})", i);
|
||||
self.modelcount_memoization(Term(i));
|
||||
}
|
||||
@ -456,16 +471,17 @@ impl Bdd {
|
||||
|
||||
fn generate_var_dependencies(&self) {
|
||||
#[cfg(feature = "variablelist")]
|
||||
self.nodes.borrow().iter().for_each(|node| {
|
||||
self.nodes.read().expect(R_FAIL).iter().for_each(|node| {
|
||||
if node.var() >= Var::BOT {
|
||||
self.var_deps.borrow_mut().push(HashSet::new());
|
||||
self.var_deps.write().expect(W_FAIL).push(HashSet::new());
|
||||
} else {
|
||||
let mut var_set: HashSet<Var> = self.var_deps.borrow()[node.lo().value()]
|
||||
.union(&self.var_deps.borrow()[node.hi().value()])
|
||||
let mut var_set: HashSet<Var> = self.var_deps.read().expect(R_FAIL)
|
||||
[node.lo().value()]
|
||||
.union(&self.var_deps.read().expect(R_FAIL)[node.hi().value()])
|
||||
.copied()
|
||||
.collect();
|
||||
var_set.insert(node.var());
|
||||
self.var_deps.borrow_mut().push(var_set);
|
||||
self.var_deps.write().expect(W_FAIL).push(var_set);
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -474,7 +490,7 @@ impl Bdd {
|
||||
pub fn var_dependencies(&self, tree: Term) -> HashSet<Var> {
|
||||
#[cfg(feature = "variablelist")]
|
||||
{
|
||||
self.var_deps.borrow()[tree.value()].clone()
|
||||
self.var_deps.read().expect(R_FAIL)[tree.value()].clone()
|
||||
}
|
||||
#[cfg(not(feature = "variablelist"))]
|
||||
{
|
||||
@ -525,7 +541,7 @@ mod test {
|
||||
#[test]
|
||||
fn newbdd() {
|
||||
let bdd = Bdd::new();
|
||||
assert_eq!(bdd.nodes.borrow().len(), 2);
|
||||
assert_eq!(bdd.nodes.read().expect(R_FAIL).len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -534,7 +550,7 @@ mod test {
|
||||
|
||||
assert_eq!(Bdd::constant(true), Term::TOP);
|
||||
assert_eq!(Bdd::constant(false), Term::BOT);
|
||||
assert_eq!(bdd.nodes.borrow().len(), 2);
|
||||
assert_eq!(bdd.nodes.read().expect(R_FAIL).len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -645,13 +661,14 @@ mod test {
|
||||
assert_eq!(bdd.models(v1, false), (1, 1).into());
|
||||
let mut x = bdd
|
||||
.count_cache
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.expect(W_FAIL)
|
||||
.iter()
|
||||
.map(|(x, y)| (*x, *y))
|
||||
.collect::<Vec<_>>();
|
||||
x.sort();
|
||||
log::debug!("{:?}", formula1);
|
||||
for x in bdd.nodes.borrow().iter().enumerate() {
|
||||
for x in bdd.nodes.read().expect(R_FAIL).iter().enumerate() {
|
||||
log::debug!("{:?}", x);
|
||||
}
|
||||
log::debug!("{:?}", x);
|
||||
@ -762,13 +779,14 @@ mod test {
|
||||
bdd.not(formula4);
|
||||
|
||||
let constructed = bdd.var_deps.clone();
|
||||
bdd.var_deps = Rc::new(RefCell::new(Vec::new()));
|
||||
bdd.var_deps = Arc::new(RwLock::new(Vec::new()));
|
||||
bdd.generate_var_dependencies();
|
||||
|
||||
constructed
|
||||
.borrow()
|
||||
.read()
|
||||
.expect(R_FAIL)
|
||||
.iter()
|
||||
.zip(bdd.var_deps.borrow().iter())
|
||||
.zip(bdd.var_deps.read().expect(R_FAIL).iter())
|
||||
.for_each(|(left, right)| {
|
||||
assert!(*left == *right);
|
||||
});
|
||||
|
||||
@ -10,7 +10,11 @@ use nom::{
|
||||
sequence::{delimited, preceded, separated_pair, terminated},
|
||||
IResult,
|
||||
};
|
||||
use std::{cell::RefCell, collections::HashMap, rc::Rc};
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::HashMap,
|
||||
sync::{Arc, RwLock},
|
||||
};
|
||||
|
||||
/// A representation of a formula, still using the strings from the input.
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
@ -127,8 +131,8 @@ impl std::fmt::Debug for Formula<'_> {
|
||||
/// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead.
|
||||
#[derive(Debug)]
|
||||
pub struct AdfParser<'a> {
|
||||
namelist: Rc<RefCell<Vec<String>>>,
|
||||
dict: Rc<RefCell<HashMap<String, usize>>>,
|
||||
namelist: Arc<RwLock<Vec<String>>>,
|
||||
dict: Arc<RwLock<HashMap<String, usize>>>,
|
||||
formulae: RefCell<Vec<Formula<'a>>>,
|
||||
formulaname: RefCell<Vec<String>>,
|
||||
}
|
||||
@ -136,8 +140,8 @@ pub struct AdfParser<'a> {
|
||||
impl Default for AdfParser<'_> {
|
||||
fn default() -> Self {
|
||||
AdfParser {
|
||||
namelist: Rc::new(RefCell::new(Vec::new())),
|
||||
dict: Rc::new(RefCell::new(HashMap::new())),
|
||||
namelist: Arc::new(RwLock::new(Vec::new())),
|
||||
dict: Arc::new(RwLock::new(HashMap::new())),
|
||||
formulae: RefCell::new(Vec::new()),
|
||||
formulaname: RefCell::new(Vec::new()),
|
||||
}
|
||||
@ -176,8 +180,8 @@ where
|
||||
|
||||
fn parse_statement(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
|
||||
|input| {
|
||||
let mut dict = self.dict.borrow_mut();
|
||||
let mut namelist = self.namelist.borrow_mut();
|
||||
let mut dict = self.dict.write().expect("Failed to gain write lock");
|
||||
let mut namelist = self.namelist.write().expect("Failed to gain write lock");
|
||||
let (remain, statement) =
|
||||
terminated(AdfParser::statement, terminated(tag("."), multispace0))(input)?;
|
||||
if !dict.contains_key(statement) {
|
||||
@ -205,11 +209,16 @@ impl AdfParser<'_> {
|
||||
fn regenerate_indizes(&self) {
|
||||
self.namelist
|
||||
.as_ref()
|
||||
.borrow()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, elem)| {
|
||||
self.dict.as_ref().borrow_mut().insert(elem.clone(), i);
|
||||
self.dict
|
||||
.as_ref()
|
||||
.write()
|
||||
.expect("Failed to gain write lock")
|
||||
.insert(elem.clone(), i);
|
||||
});
|
||||
}
|
||||
|
||||
@ -217,7 +226,10 @@ impl AdfParser<'_> {
|
||||
/// Results, which got used before might become corrupted.
|
||||
/// Ensure that all used data is physically copied.
|
||||
pub fn varsort_lexi(&self) -> &Self {
|
||||
self.namelist.as_ref().borrow_mut().sort_unstable();
|
||||
self.namelist
|
||||
.write()
|
||||
.expect("Failed to gain write lock")
|
||||
.sort_unstable();
|
||||
self.regenerate_indizes();
|
||||
self
|
||||
}
|
||||
@ -227,8 +239,8 @@ impl AdfParser<'_> {
|
||||
/// Ensure that all used data is physically copied.
|
||||
pub fn varsort_alphanum(&self) -> &Self {
|
||||
self.namelist
|
||||
.as_ref()
|
||||
.borrow_mut()
|
||||
.write()
|
||||
.expect("Failed to gain write lock")
|
||||
.string_sort_unstable(natural_lexical_cmp);
|
||||
self.regenerate_indizes();
|
||||
self
|
||||
@ -343,14 +355,23 @@ impl AdfParser<'_> {
|
||||
/// Allows insight of the number of parsed statements.
|
||||
pub fn dict_size(&self) -> usize {
|
||||
//self.dict.borrow().len()
|
||||
self.dict.as_ref().borrow().len()
|
||||
self.dict
|
||||
.as_ref()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.len()
|
||||
}
|
||||
|
||||
/// Returns the number-representation and position of a given statement in string-representation.
|
||||
///
|
||||
/// Will return [None] if the string does no occur in the dictionary.
|
||||
pub fn dict_value(&self, value: &str) -> Option<usize> {
|
||||
self.dict.as_ref().borrow().get(value).copied()
|
||||
self.dict
|
||||
.as_ref()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.get(value)
|
||||
.copied()
|
||||
}
|
||||
|
||||
/// Returns the acceptance condition of a statement at the given position.
|
||||
@ -360,12 +381,12 @@ impl AdfParser<'_> {
|
||||
self.formulae.borrow().get(idx).cloned()
|
||||
}
|
||||
|
||||
pub(crate) fn dict_rc_refcell(&self) -> Rc<RefCell<HashMap<String, usize>>> {
|
||||
Rc::clone(&self.dict)
|
||||
pub(crate) fn dict_rc_refcell(&self) -> Arc<RwLock<HashMap<String, usize>>> {
|
||||
Arc::clone(&self.dict)
|
||||
}
|
||||
|
||||
pub(crate) fn namelist_rc_refcell(&self) -> Rc<RefCell<Vec<String>>> {
|
||||
Rc::clone(&self.namelist)
|
||||
pub(crate) fn namelist_rc_refcell(&self) -> Arc<RwLock<Vec<String>>> {
|
||||
Arc::clone(&self.namelist)
|
||||
}
|
||||
|
||||
pub(crate) fn formula_count(&self) -> usize {
|
||||
@ -379,8 +400,8 @@ impl AdfParser<'_> {
|
||||
.map(|name| {
|
||||
*self
|
||||
.dict
|
||||
.as_ref()
|
||||
.borrow()
|
||||
.read()
|
||||
.expect("Failed to gain read lock")
|
||||
.get(name)
|
||||
.expect("Dictionary should contain all the used formulanames")
|
||||
})
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user