1
0
mirror of https://github.com/ellmau/adf-obdd.git synced 2025-12-19 09:29:36 +01:00

Feature/issue 39 counting model improvements (#42)

* Add more efficient construction of 2-val models with counting
* Increased patch-number of the version
This commit is contained in:
Stefan Ellmauthaler 2022-04-06 13:43:58 +02:00 committed by GitHub
parent 02dc37cbbf
commit 7e66d89d03
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 295 additions and 58 deletions

4
Cargo.lock generated
View File

@ -4,7 +4,7 @@ version = 3
[[package]] [[package]]
name = "adf_bdd" name = "adf_bdd"
version = "0.2.2" version = "0.2.3"
dependencies = [ dependencies = [
"biodivine-lib-bdd", "biodivine-lib-bdd",
"derivative", "derivative",
@ -21,7 +21,7 @@ dependencies = [
[[package]] [[package]]
name = "adf_bdd-solver" name = "adf_bdd-solver"
version = "0.2.1" version = "0.2.3"
dependencies = [ dependencies = [
"adf_bdd", "adf_bdd",
"assert_cmd", "assert_cmd",

View File

@ -1,3 +1,7 @@
[workspace] [workspace]
members=[ "lib", "bin" ] members=[ "lib", "bin" ]
default-members = [ "lib" ] default-members = [ "lib" ]
[profile.release]
lto = "fat"
codegen-units = 1

View File

@ -1,6 +1,6 @@
[package] [package]
name = "adf_bdd-solver" name = "adf_bdd-solver"
version = "0.2.1" version = "0.2.3"
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"] authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
edition = "2021" edition = "2021"
license = "GPL-3.0-only" license = "GPL-3.0-only"

View File

@ -1,6 +1,6 @@
[package] [package]
name = "adf_bdd" name = "adf_bdd"
version = "0.2.2" version = "0.2.3"
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"] authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
edition = "2021" edition = "2021"
repository = "https://github.com/ellmau/adf-obdd/" repository = "https://github.com/ellmau/adf-obdd/"

View File

@ -1,7 +1,7 @@
/*! /*!
This module describes the abstract dialectical framework This module describes the abstract dialectical framework
- computing interpretations - computing interpretations and models
- computing fixpoints - computing fixpoints
*/ */
@ -388,20 +388,52 @@ impl Adf {
} }
fn two_val_model_counts(&mut self, interpr: &[Term]) -> Vec<Vec<Term>> { fn two_val_model_counts(&mut self, interpr: &[Term]) -> Vec<Vec<Term>> {
log::trace!("two_val_model_counts({:?}) called ", interpr); self.two_val_model_counts_logic(interpr, &vec![Term::UND; interpr.len()], 0)
}
fn heuristic1(
&self,
lhs: (Var, Term),
rhs: (Var, Term),
interpr: &[Term],
) -> std::cmp::Ordering {
match self
.bdd
.var_impact(rhs.0, interpr)
.cmp(&self.bdd.var_impact(lhs.0, interpr))
{
std::cmp::Ordering::Equal => match self
.bdd
.nacyc_var_impact(lhs.0, interpr)
.cmp(&self.bdd.nacyc_var_impact(rhs.0, interpr))
{
std::cmp::Ordering::Equal => self
.bdd
.paths(lhs.1, true)
.minimum()
.cmp(&self.bdd.paths(rhs.1, true).minimum()),
value => value,
},
value => value,
}
}
fn two_val_model_counts_logic(
&mut self,
interpr: &[Term],
will_be: &[Term],
depth: usize,
) -> Vec<Vec<Term>> {
log::debug!("two_val_model_recursion_depth: {}/{}", depth, interpr.len());
if let Some((idx, ac)) = interpr if let Some((idx, ac)) = interpr
.iter() .iter()
.enumerate() .enumerate()
.filter(|(_idx, val)| !val.is_truth_value()) .filter(|(idx, val)| !(val.is_truth_value() || will_be[*idx].is_truth_value()))
.min_by(|(_idx_a, val_a), (_idx_b, val_b)| { .min_by(|(idx_a, val_a), (idx_b, val_b)| {
self.bdd self.heuristic1((Var(*idx_a), **val_a), (Var(*idx_b), **val_b), interpr)
.models(**val_a, true)
.minimum()
.cmp(&self.bdd.models(**val_b, true).minimum())
}) })
{ {
let mut result = Vec::new(); let mut result = Vec::new();
let check_models = !self.bdd.models(*ac, true).more_models(); let check_models = !self.bdd.paths(*ac, true).more_models();
log::trace!( log::trace!(
"Identified Var({}) with ac {:?} to be {}", "Identified Var({}) with ac {:?} to be {}",
idx, idx,
@ -417,15 +449,16 @@ impl Adf {
let res = negative let res = negative
.iter() .iter()
.try_for_each(|var| { .try_for_each(|var| {
if new_int[var.value()].is_true() { if new_int[var.value()].is_true() || will_be[var.value()] == Term::TOP {
return Err(()); return Err(());
} }
new_int[var.value()] = Term::BOT; new_int[var.value()] = Term::BOT;
Ok(()) Ok(())
}) })
.and(positive.iter().try_for_each(|var| { .and(positive.iter().try_for_each(|var| {
if new_int[var.value()].is_truth_value() if (new_int[var.value()].is_truth_value()
&& !new_int[var.value()].is_true() && !new_int[var.value()].is_true())
|| will_be[var.value()] == Term::BOT
{ {
return Err(()); return Err(());
} }
@ -434,37 +467,81 @@ impl Adf {
})); }));
if res.is_ok() { if res.is_ok() {
new_int[idx] = if check_models { Term::TOP } else { Term::BOT }; new_int[idx] = if check_models { Term::TOP } else { Term::BOT };
let upd_int = self.update_interpretation(&new_int); let upd_int = self.update_interpretation_fixpoint(&new_int);
result.append(&mut self.two_val_model_counts(&upd_int)); if self.check_consistency(&upd_int, will_be) {
result.append(&mut self.two_val_model_counts_logic(
&upd_int,
will_be,
depth + 1,
));
}
} }
res res
}); });
log::trace!("results found so far:{}", result.len());
// checked one alternative, we can now conclude that only the other option may work // checked one alternative, we can now conclude that only the other option may work
log::trace!("checked one alternative, concluding the other value"); log::debug!("checked one alternative, concluding the other value");
let new_int = interpr let new_int = interpr
.iter() .iter()
.map(|tree| self.bdd.restrict(*tree, Var(idx), !check_models)) .map(|tree| self.bdd.restrict(*tree, Var(idx), !check_models))
.collect::<Vec<Term>>(); .collect::<Vec<Term>>();
let mut upd_int = self.update_interpretation(&new_int); let mut upd_int = self.update_interpretation_fixpoint(&new_int);
// TODO: should be "must be true/false" instead of setting it to TOP/BOT and will need sanity checks at every iteration
log::trace!("\nnew_int {new_int:?}\nupd_int {upd_int:?}"); log::trace!("\nnew_int {new_int:?}\nupd_int {upd_int:?}");
if new_int[idx].no_inf_decrease(&upd_int[idx]) { if new_int[idx].no_inf_inconsistency(&upd_int[idx]) {
upd_int[idx] = if check_models { Term::BOT } else { Term::TOP }; upd_int[idx] = if check_models { Term::BOT } else { Term::TOP };
if new_int[idx].no_inf_decrease(&upd_int[idx]) { if new_int[idx].no_inf_inconsistency(&upd_int[idx]) {
result.append(&mut self.two_val_model_counts(&upd_int)); let mut must_be_new = will_be.to_vec();
must_be_new[idx] = new_int[idx];
result.append(&mut self.two_val_model_counts_logic(
&upd_int,
&must_be_new,
depth + 1,
));
} }
} }
result result
} else { } else {
// filter has created empty iterator // filter has created empty iterator
let concluded = interpr
.iter()
.enumerate()
.map(|(idx, val)| {
if !val.is_truth_value() {
will_be[idx]
} else {
*val
}
})
.collect::<Vec<Term>>();
let ac = self.ac.clone();
let result = self.apply_interpretation(&ac, &concluded);
if self.check_consistency(&result, &concluded) {
vec![result]
} else {
vec![interpr.to_vec()] vec![interpr.to_vec()]
} }
} }
}
fn update_interpretation_fixpoint(&mut self, interpretation: &[Term]) -> Vec<Term> {
let mut cur_int = interpretation.to_vec();
loop {
let new_int = self.update_interpretation(interpretation);
if cur_int == new_int {
return cur_int;
} else {
cur_int = new_int;
}
}
}
fn update_interpretation(&mut self, interpretation: &[Term]) -> Vec<Term> { fn update_interpretation(&mut self, interpretation: &[Term]) -> Vec<Term> {
interpretation self.apply_interpretation(interpretation, interpretation)
.iter() }
fn apply_interpretation(&mut self, ac: &[Term], interpretation: &[Term]) -> Vec<Term> {
ac.iter()
.map(|ac| { .map(|ac| {
interpretation interpretation
.iter() .iter()
@ -480,6 +557,13 @@ impl Adf {
.collect::<Vec<Term>>() .collect::<Vec<Term>>()
} }
fn check_consistency(&mut self, interpretation: &[Term], will_be: &[Term]) -> bool {
interpretation
.iter()
.zip(will_be.iter())
.all(|(int, wb)| wb.no_inf_inconsistency(int))
}
/// Computes the complete models /// Computes the complete models
/// Returns an Iterator which contains all complete models /// Returns an Iterator which contains all complete models
pub fn complete<'a, 'c>(&'a mut self) -> impl Iterator<Item = Vec<Term>> + 'c pub fn complete<'a, 'c>(&'a mut self) -> impl Iterator<Item = Vec<Term>> + 'c

View File

@ -73,7 +73,7 @@ impl Term {
} }
/// Returns true if the information of *other* does not decrease and it is not inconsistent. /// Returns true if the information of *other* does not decrease and it is not inconsistent.
pub fn no_inf_decrease(&self, other: &Self) -> bool { pub fn no_inf_inconsistency(&self, other: &Self) -> bool {
if self.compare_inf(other) { if self.compare_inf(other) {
return true; return true;
} }
@ -185,7 +185,7 @@ impl BddNode {
} }
} }
/// Type alias for the pair of counter-models and models /// Represents the pair of counts, related to counter-models and models.
#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord)] #[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord)]
pub struct ModelCounts { pub struct ModelCounts {
/// Contains the number of counter-models /// Contains the number of counter-models
@ -225,8 +225,8 @@ impl From<(usize, usize)> for ModelCounts {
} }
} }
} }
/// Type alias for the Modelcounts and the depth of a given Node in a BDD /// Type alias for the Modelcounts, Count of paths to bot resp top, and the depth of a given Node in a BDD
pub type CountNode = (ModelCounts, usize); pub type CountNode = (ModelCounts, ModelCounts, usize);
/// Type alias for Facet counts, which contains number of facets and counter facets. /// Type alias for Facet counts, which contains number of facets and counter facets.
pub type FacetCounts = (usize, usize); pub type FacetCounts = (usize, usize);

View File

@ -51,11 +51,11 @@ impl Bdd {
result result
.count_cache .count_cache
.borrow_mut() .borrow_mut()
.insert(Term::TOP, (ModelCounts::top(), 0)); .insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
result result
.count_cache .count_cache
.borrow_mut() .borrow_mut()
.insert(Term::BOT, (ModelCounts::bot(), 0)); .insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
result result
} }
} }
@ -228,22 +228,28 @@ impl Bdd {
var_set.insert(var); var_set.insert(var);
self.var_deps.push(var_set); self.var_deps.push(var_set);
} }
log::debug!("newterm: {} as {:?}", new_term, node);
#[cfg(feature = "adhoccounting")] #[cfg(feature = "adhoccounting")]
{ {
log::debug!("newterm: {} as {:?}", new_term, node);
let mut count_cache = self.count_cache.borrow_mut(); let mut count_cache = self.count_cache.borrow_mut();
let (lo_counts, lodepth) = *count_cache.get(&lo).expect("Cache corrupted"); let (lo_counts, lo_paths, lodepth) =
let (hi_counts, hidepth) = *count_cache.get(&hi).expect("Cache corrupted"); *count_cache.get(&lo).expect("Cache corrupted");
let (hi_counts, hi_paths, hidepth) =
*count_cache.get(&hi).expect("Cache corrupted");
log::debug!( log::debug!(
"lo (cm: {}, mo: {}, dp: {})", "lo (cm: {}, mo: {}, p-: {}, p+: {}, dp: {})",
lo_counts.cmodels, lo_counts.cmodels,
lo_counts.models, lo_counts.models,
lo_paths.cmodels,
lo_paths.models,
lodepth lodepth
); );
log::debug!( log::debug!(
"hi (cm: {}, mo: {}, dp: {})", "hi (cm: {}, mo: {}, p-: {}, p+: {}, dp: {})",
hi_counts.cmodels, hi_counts.cmodels,
hi_counts.models, hi_counts.models,
hi_paths.cmodels,
hi_paths.models,
hidepth hidepth
); );
let (lo_exp, hi_exp) = if lodepth > hidepth { let (lo_exp, hi_exp) = if lodepth > hidepth {
@ -260,6 +266,11 @@ impl Bdd {
lo_counts.models * lo_exp + hi_counts.models * hi_exp, lo_counts.models * lo_exp + hi_counts.models * hi_exp,
) )
.into(), .into(),
(
lo_paths.cmodels + hi_paths.cmodels,
lo_paths.models + hi_paths.models,
)
.into(),
std::cmp::max(lodepth, hidepth) + 1, std::cmp::max(lodepth, hidepth) + 1,
), ),
); );
@ -270,7 +281,7 @@ impl Bdd {
} }
} }
/// Computes the number of counter-models and models for a given BDD-tree /// Computes the number of counter-models and models for a given BDD-tree.
/// ///
/// Use the flag `_memoization` to choose between using the memoization approach or not. (This flag does nothing if the feature `adhoccounting` is used) /// Use the flag `_memoization` to choose between using the memoization approach or not. (This flag does nothing if the feature `adhoccounting` is used)
pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts { pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts {
@ -286,19 +297,56 @@ impl Bdd {
} }
} }
/// Computes the number of paths which lead to Bot respectively Top.
///
/// Use the flag `_memoization` to choose between using the memoization approach or not. (This flag does nothing if the feature `adhoccounting` is used)
pub fn paths(&self, term: Term, _memoization: bool) -> ModelCounts {
#[cfg(feature = "adhoccounting")]
{
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").1;
}
#[cfg(not(feature = "adhoccounting"))]
if _memoization {
self.modelcount_memoization(term).1
} else {
self.modelcount_naive(term).1
}
}
/// Computes the maximal depth of the given sub-tree.
#[allow(dead_code)] // max depth may be used in future heuristics
pub fn max_depth(&self, term: Term) -> usize {
#[cfg(feature = "adhoccounting")]
{
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").2;
}
#[cfg(not(feature = "adhoccounting"))]
match self.count_cache.borrow().get(&term) {
Some((_mc, _pc, depth)) => *depth,
None => {
if term.is_truth_value() {
0
} else {
self.max_depth(self.nodes[term.0].hi())
.max(self.max_depth(self.nodes[term.0].lo()))
}
}
}
}
#[allow(dead_code)] // dead code due to more efficient ad-hoc building, still used for a couple of tests #[allow(dead_code)] // dead code due to more efficient ad-hoc building, still used for a couple of tests
/// Computes the number of counter-models, models, and variables for a given BDD-tree /// Computes the number of counter-models, models, and variables for a given BDD-tree
fn modelcount_naive(&self, term: Term) -> CountNode { fn modelcount_naive(&self, term: Term) -> CountNode {
if term == Term::TOP { if term == Term::TOP {
(ModelCounts::top(), 0) (ModelCounts::top(), ModelCounts::top(), 0)
} else if term == Term::BOT { } else if term == Term::BOT {
(ModelCounts::bot(), 0) (ModelCounts::bot(), ModelCounts::bot(), 0)
} else { } else {
let node = &self.nodes[term.0]; let node = &self.nodes[term.0];
let mut lo_exp = 0u32; let mut lo_exp = 0u32;
let mut hi_exp = 0u32; let mut hi_exp = 0u32;
let (lo_counts, lodepth) = self.modelcount_naive(node.lo()); let (lo_counts, lo_paths, lodepth) = self.modelcount_naive(node.lo());
let (hi_counts, hidepth) = self.modelcount_naive(node.hi()); let (hi_counts, hi_paths, hidepth) = self.modelcount_naive(node.hi());
if lodepth > hidepth { if lodepth > hidepth {
hi_exp = (lodepth - hidepth) as u32; hi_exp = (lodepth - hidepth) as u32;
} else { } else {
@ -310,6 +358,11 @@ impl Bdd {
lo_counts.models * 2usize.pow(lo_exp) + hi_counts.models * 2usize.pow(hi_exp), lo_counts.models * 2usize.pow(lo_exp) + hi_counts.models * 2usize.pow(hi_exp),
) )
.into(), .into(),
(
lo_paths.cmodels + hi_paths.cmodels,
lo_paths.models + hi_paths.models,
)
.into(),
std::cmp::max(lodepth, hidepth) + 1, std::cmp::max(lodepth, hidepth) + 1,
) )
} }
@ -317,9 +370,9 @@ impl Bdd {
fn modelcount_memoization(&self, term: Term) -> CountNode { fn modelcount_memoization(&self, term: Term) -> CountNode {
if term == Term::TOP { if term == Term::TOP {
(ModelCounts::top(), 0) (ModelCounts::top(), ModelCounts::top(), 0)
} else if term == Term::BOT { } else if term == Term::BOT {
(ModelCounts::bot(), 0) (ModelCounts::bot(), ModelCounts::bot(), 0)
} else { } else {
if let Some(result) = self.count_cache.borrow().get(&term) { if let Some(result) = self.count_cache.borrow().get(&term) {
return *result; return *result;
@ -328,8 +381,8 @@ impl Bdd {
let node = &self.nodes[term.0]; let node = &self.nodes[term.0];
let mut lo_exp = 0u32; let mut lo_exp = 0u32;
let mut hi_exp = 0u32; let mut hi_exp = 0u32;
let (lo_counts, lodepth) = self.modelcount_memoization(node.lo()); let (lo_counts, lo_paths, lodepth) = self.modelcount_memoization(node.lo());
let (hi_counts, hidepth) = self.modelcount_memoization(node.hi()); let (hi_counts, hi_paths, hidepth) = self.modelcount_memoization(node.hi());
if lodepth > hidepth { if lodepth > hidepth {
hi_exp = (lodepth - hidepth) as u32; hi_exp = (lodepth - hidepth) as u32;
} else { } else {
@ -343,6 +396,11 @@ impl Bdd {
+ hi_counts.models * 2usize.pow(hi_exp), + hi_counts.models * 2usize.pow(hi_exp),
) )
.into(), .into(),
(
lo_paths.cmodels + hi_paths.cmodels,
lo_paths.models + hi_paths.models,
)
.into(),
std::cmp::max(lodepth, hidepth) + 1, std::cmp::max(lodepth, hidepth) + 1,
) )
}; };
@ -358,10 +416,10 @@ impl Bdd {
{ {
self.count_cache self.count_cache
.borrow_mut() .borrow_mut()
.insert(Term::TOP, (ModelCounts::top(), 0)); .insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
self.count_cache self.count_cache
.borrow_mut() .borrow_mut()
.insert(Term::BOT, (ModelCounts::bot(), 0)); .insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
for i in 0..self.nodes.len() { for i in 0..self.nodes.len() {
log::debug!("fixing Term({})", i); log::debug!("fixing Term({})", i);
self.modelcount_memoization(Term(i)); self.modelcount_memoization(Term(i));
@ -405,6 +463,29 @@ impl Bdd {
var_set var_set
} }
} }
pub fn var_impact(&self, var: Var, termlist: &[Term]) -> usize {
termlist.iter().fold(0usize, |acc, val| {
if self.var_dependencies(*val).contains(&var) {
acc + 1
} else {
acc
}
})
}
pub fn nacyc_var_impact(&self, var: Var, termlist: &[Term]) -> usize {
(0..termlist.len()).into_iter().fold(0usize, |acc, idx| {
if self
.var_dependencies(termlist[var.value()])
.contains(&Var(idx))
{
acc + 1
} else {
acc
}
})
}
} }
#[cfg(test)] #[cfg(test)]
@ -554,13 +635,46 @@ mod test {
assert_eq!(bdd.models(Term::TOP, true), (0, 1).into()); assert_eq!(bdd.models(Term::TOP, true), (0, 1).into());
assert_eq!(bdd.models(Term::BOT, true), (1, 0).into()); assert_eq!(bdd.models(Term::BOT, true), (1, 0).into());
assert_eq!(bdd.modelcount_naive(v1), ((1, 1).into(), 1)); assert_eq!(bdd.paths(formula1, false), (2, 1).into());
assert_eq!(bdd.modelcount_naive(formula1), ((3, 1).into(), 2)); assert_eq!(bdd.paths(formula2, false), (1, 2).into());
assert_eq!(bdd.modelcount_naive(formula2), ((1, 3).into(), 2)); assert_eq!(bdd.paths(formula3, false), (2, 2).into());
assert_eq!(bdd.modelcount_naive(formula3), ((2, 2).into(), 2)); assert_eq!(bdd.paths(formula4, false), (3, 2).into());
assert_eq!(bdd.modelcount_naive(formula4), ((5, 3).into(), 3)); assert_eq!(bdd.paths(Term::TOP, false), (0, 1).into());
assert_eq!(bdd.modelcount_naive(Term::TOP), ((0, 1).into(), 0)); assert_eq!(bdd.paths(Term::BOT, false), (1, 0).into());
assert_eq!(bdd.modelcount_naive(Term::BOT), ((1, 0).into(), 0));
assert_eq!(bdd.paths(v1, true), (1, 1).into());
assert_eq!(bdd.paths(formula1, true), (2, 1).into());
assert_eq!(bdd.paths(formula2, true), (1, 2).into());
assert_eq!(bdd.paths(formula3, true), (2, 2).into());
assert_eq!(bdd.paths(formula4, true), (3, 2).into());
assert_eq!(bdd.paths(Term::TOP, true), (0, 1).into());
assert_eq!(bdd.paths(Term::BOT, true), (1, 0).into());
assert_eq!(bdd.modelcount_naive(v1), ((1, 1).into(), (1, 1).into(), 1));
assert_eq!(
bdd.modelcount_naive(formula1),
((3, 1).into(), (2, 1).into(), 2)
);
assert_eq!(
bdd.modelcount_naive(formula2),
((1, 3).into(), (1, 2).into(), 2)
);
assert_eq!(
bdd.modelcount_naive(formula3),
((2, 2).into(), (2, 2).into(), 2)
);
assert_eq!(
bdd.modelcount_naive(formula4),
((5, 3).into(), (3, 2).into(), 3)
);
assert_eq!(
bdd.modelcount_naive(Term::TOP),
((0, 1).into(), (0, 1).into(), 0)
);
assert_eq!(
bdd.modelcount_naive(Term::BOT),
((1, 0).into(), (1, 0).into(), 0)
);
assert_eq!( assert_eq!(
bdd.modelcount_naive(formula4), bdd.modelcount_naive(formula4),
@ -588,6 +702,11 @@ mod test {
bdd.modelcount_naive(Term::BOT), bdd.modelcount_naive(Term::BOT),
bdd.modelcount_memoization(Term::BOT) bdd.modelcount_memoization(Term::BOT)
); );
assert_eq!(bdd.max_depth(Term::BOT), 0);
assert_eq!(bdd.max_depth(v1), 1);
assert_eq!(bdd.max_depth(formula3), 2);
assert_eq!(bdd.max_depth(formula4), 3);
} }
#[cfg(feature = "variablelist")] #[cfg(feature = "variablelist")]
@ -617,6 +736,36 @@ mod test {
.for_each(|(left, right)| { .for_each(|(left, right)| {
assert!(left == right); assert!(left == right);
}); });
assert_eq!(
bdd.var_impact(Var(0), &[formula1, formula2, formula3, formula4]),
4
);
assert_eq!(
bdd.var_impact(Var(2), &[formula1, formula2, formula3, formula4]),
1
);
assert_eq!(bdd.var_impact(Var(2), &[formula1, formula2, formula3]), 0);
}
#[test]
fn var_impact() {
let mut bdd = Bdd::new();
let v1 = bdd.variable(Var(0));
let v2 = bdd.variable(Var(1));
let v3 = bdd.variable(Var(2));
let formula1 = bdd.and(v1, v2);
let formula2 = bdd.or(v1, v2);
let ac: Vec<Term> = vec![formula1, formula2, v3];
assert_eq!(bdd.var_impact(Var(0), &ac), 2);
assert_eq!(bdd.var_impact(Var(1), &ac), 2);
assert_eq!(bdd.var_impact(Var(2), &ac), 1);
assert_eq!(bdd.nacyc_var_impact(Var(0), &ac), 2);
assert_eq!(bdd.nacyc_var_impact(Var(2), &ac), 1);
} }
#[test] #[test]