1
0
mirror of https://github.com/ellmau/adf-obdd.git synced 2025-12-19 09:29:36 +01:00

Feature/issue 17 biodivine (#18)

State of the art BDD library, naive implementation, and Hybrid-approach available in the first beta-release version 0.2.0

* ADD biodivine as an obdd library to adf-obdd

* Implement restrict-wrapper on Biodivine

* Testcases changed to use biodivine for grounded.

* API unified

* ADD stable and complete with biodivine

* moved main.rs to bin folder

* ADD biodivine -> naive translation

* ADD hybrid approach
  instantiation + grounded by biodivine, then naive approach with memoization

* TIDY Readme, doc, tests
This commit is contained in:
Stefan Ellmauthaler 2022-01-27 12:23:12 +01:00 committed by GitHub
parent 864419d033
commit 1edfe43985
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1473 additions and 291 deletions

104
Cargo.lock generated
View File

@ -4,10 +4,11 @@ version = 3
[[package]] [[package]]
name = "adf_bdd" name = "adf_bdd"
version = "0.1.4" version = "0.2.0"
dependencies = [ dependencies = [
"assert_cmd", "assert_cmd",
"assert_fs", "assert_fs",
"biodivine-lib-bdd",
"clap", "clap",
"env_logger 0.9.0", "env_logger 0.9.0",
"lexical-sort", "lexical-sort",
@ -91,6 +92,16 @@ version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "biodivine-lib-bdd"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bddcfb460c1131729ec8d797ce0be49ec102506e764ce62f20a40056516851c5"
dependencies = [
"fxhash",
"rand 0.7.3",
]
[[package]] [[package]]
name = "bitflags" name = "bitflags"
version = "1.3.2" version = "1.3.2"
@ -108,6 +119,12 @@ dependencies = [
"regex-automata", "regex-automata",
] ]
[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]] [[package]]
name = "cfg-if" name = "cfg-if"
version = "1.0.0" version = "1.0.0"
@ -195,6 +212,26 @@ version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "fxhash"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
dependencies = [
"byteorder",
]
[[package]]
name = "getrandom"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
dependencies = [
"cfg-if",
"libc",
"wasi 0.9.0+wasi-snapshot-preview1",
]
[[package]] [[package]]
name = "getrandom" name = "getrandom"
version = "0.2.3" version = "0.2.3"
@ -203,7 +240,7 @@ checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"libc", "libc",
"wasi", "wasi 0.10.2+wasi-snapshot-preview1",
] ]
[[package]] [[package]]
@ -438,7 +475,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6"
dependencies = [ dependencies = [
"env_logger 0.8.4", "env_logger 0.8.4",
"log", "log",
"rand", "rand 0.8.4",
] ]
[[package]] [[package]]
@ -461,6 +498,19 @@ dependencies = [
"proc-macro2", "proc-macro2",
] ]
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
"getrandom 0.1.16",
"libc",
"rand_chacha 0.2.2",
"rand_core 0.5.1",
"rand_hc 0.2.0",
]
[[package]] [[package]]
name = "rand" name = "rand"
version = "0.8.4" version = "0.8.4"
@ -468,9 +518,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
dependencies = [ dependencies = [
"libc", "libc",
"rand_chacha", "rand_chacha 0.3.1",
"rand_core", "rand_core 0.6.3",
"rand_hc", "rand_hc 0.3.1",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
"rand_core 0.5.1",
] ]
[[package]] [[package]]
@ -480,7 +540,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [ dependencies = [
"ppv-lite86", "ppv-lite86",
"rand_core", "rand_core 0.6.3",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
"getrandom 0.1.16",
] ]
[[package]] [[package]]
@ -489,7 +558,16 @@ version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
dependencies = [ dependencies = [
"getrandom", "getrandom 0.2.3",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
"rand_core 0.5.1",
] ]
[[package]] [[package]]
@ -498,7 +576,7 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
dependencies = [ dependencies = [
"rand_core", "rand_core 0.6.3",
] ]
[[package]] [[package]]
@ -637,7 +715,7 @@ checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"libc", "libc",
"rand", "rand 0.8.4",
"redox_syscall", "redox_syscall",
"remove_dir_all", "remove_dir_all",
"winapi", "winapi",
@ -737,6 +815,12 @@ dependencies = [
"winapi-util", "winapi-util",
] ]
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]] [[package]]
name = "wasi" name = "wasi"
version = "0.10.2+wasi-snapshot-preview1" version = "0.10.2+wasi-snapshot-preview1"

View File

@ -1,18 +1,19 @@
[package] [package]
name = "adf_bdd" name = "adf_bdd"
version = "0.1.4" version = "0.2.0"
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"] authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
edition = "2021" edition = "2021"
repository = "https://github.com/ellmau/adf-obdd/" repository = "https://github.com/ellmau/adf-obdd/"
license = "GPL-3.0-only" license = "GPL-3.0-only"
exclude = ["res/", "./flake*", "*.nix", ".envrc", "_config.yml"] exclude = ["res/", "./flake*", "*.nix", ".envrc", "_config.yml"]
description = "Solver for ADFs grounded semantics by utilising OBDDs - ordered binary decision diagrams" description = "Solver for ADFs grounded, complete, and stable semantics by utilising OBDDs - ordered binary decision diagrams"
build = "build.rs" build = "build.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[[bin]]
name="adf_bdd"
[dependencies] [dependencies]
clap = "2.33.*" clap = "2.33.*"
structopt = "0.3.25" structopt = "0.3.25"
@ -22,6 +23,7 @@ nom = "7.1.0"
lexical-sort = "0.3.1" lexical-sort = "0.3.1"
serde = { version = "1.0", features = ["derive","rc"] } serde = { version = "1.0", features = ["derive","rc"] }
serde_json = "1.0" serde_json = "1.0"
biodivine-lib-bdd = "0.3.0"
[dev-dependencies] [dev-dependencies]
test-log = "0.2.*" test-log = "0.2.*"

View File

@ -7,6 +7,48 @@ An abstract dialectical framework (ADF) consists of abstract statements. Each st
## Ordered Binary Decision Diagram ## Ordered Binary Decision Diagram
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap. An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
## Usage
```
USAGE:
adf_bdd [FLAGS] [OPTIONS] <input>
FLAGS:
--com Compute the complete models
--grd Compute the grounded model
-h, --help Prints help information
--import Import an adf- bdd state instead of an adf
-q Sets log verbosity to only errors
--an Sorts variables in an alphanumeric manner
--lx Sorts variables in an lexicographic manner
--stm Compute the stable models
-V, --version Prints version information
-v Sets log verbosity (multiple times means more verbose)
OPTIONS:
--export <export> Export the adf-bdd state after parsing and BDD instantiation to the given filename
--lib <implementation> choose the bdd implementation of either 'biodivine', 'naive', or hybrid [default:
biodivine]
--rust_log <rust-log> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
[env: RUST_LOG=debug]
ARGS:
<input> Input filename
```
Note that import and export only works if the naive library is chosen
Right now there is no additional information to the computed models, so if you use --com --grd --stm the borders between the results are not obviously communicated.
They can be easily identified though:
- The computation is always in the same order
- grd
- com
- stm
- We know that there is always exactly one grounded model
- We know that there always exist at least one complete model (i.e. the grounded one)
- We know that there does not need to exist a stable model
- We know that every stable model is a complete model too
## Input-file format: ## Input-file format:
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement. Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows: The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
@ -35,3 +77,17 @@ $> cargo test
``` ```
Note that some of the instances are quite big and it might take some time to finish all the tests. Note that some of the instances are quite big and it might take some time to finish all the tests.
If you do not initialise the submodule, tests will "only" run on the other unit-tests and (possibly forthcoming) other integration tests. If you do not initialise the submodule, tests will "only" run on the other unit-tests and (possibly forthcoming) other integration tests.
Due to the way of the generated test-modules you need to call
```bash
$> cargo clean
```
if you change some of your test-cases.
To remove the tests just type
```bash
$> git submodule deinit res/adf-instances
```
or
```bash
$> git submodule deinit --all
```

View File

@ -58,7 +58,7 @@ fn write_header(test_file: &mut File) {
write!( write!(
test_file, test_file,
r#" r#"
use adf_bdd::adf::Adf; use adf_bdd::adfbiodivine::Adf;
use adf_bdd::parser::AdfParser; use adf_bdd::parser::AdfParser;
use test_log::test; use test_log::test;
"# "#

View File

@ -1,16 +1,14 @@
//! This module describes the abstract dialectical framework //! This module describes the abstract dialectical framework
//! //!
//! It handles
//! - parsing of statements and acceptance functions
//! - computing interpretations //! - computing interpretations
//! - computing fixpoints //! - computing fixpoints
//! - computing the least fixpoint by using a shortcut
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::{ use crate::{
datatypes::{ datatypes::{
adf::{ adf::{
PrintableInterpretation, ThreeValuedInterpretationsIterator, PrintDictionary, PrintableInterpretation, ThreeValuedInterpretationsIterator,
TwoValuedInterpretationsIterator, VarContainer, TwoValuedInterpretationsIterator, VarContainer,
}, },
Term, Var, Term, Var,
@ -27,6 +25,16 @@ pub struct Adf {
ac: Vec<Term>, ac: Vec<Term>,
} }
impl Default for Adf {
fn default() -> Self {
Self {
ordering: VarContainer::default(),
bdd: Bdd::new(),
ac: Vec::new(),
}
}
}
impl Adf { impl Adf {
/// Instantiates a new ADF, based on the parser-data /// Instantiates a new ADF, based on the parser-data
pub fn from_parser(parser: &AdfParser) -> Self { pub fn from_parser(parser: &AdfParser) -> Self {
@ -65,6 +73,66 @@ impl Adf {
result result
} }
pub(crate) fn from_biodivine_vector(
ordering: &VarContainer,
bio_ac: &[biodivine_lib_bdd::Bdd],
) -> Self {
let mut result = Self {
ordering: VarContainer::copy(ordering),
bdd: Bdd::new(),
ac: vec![Term(0); bio_ac.len()],
};
result
.ac
.iter_mut()
.zip(bio_ac.iter())
.for_each(|(new_ac, bdd_ac)| {
if bdd_ac.is_true() {
*new_ac = Bdd::constant(true);
} else if bdd_ac.is_false() {
*new_ac = Bdd::constant(false);
} else {
// compound formula
let mut term_vec: Vec<Term> = Vec::new();
for (idx, tuple) in bdd_ac
.to_string()
.split('|')
.filter(|tuple| !tuple.is_empty())
.enumerate()
{
let node_elements = tuple.split(',').collect::<Vec<&str>>();
if idx == 0 {
term_vec.push(Bdd::constant(false));
} else if idx == 1 {
term_vec.push(Bdd::constant(true));
} else {
let new_term = result.bdd.node(
Var(node_elements[0]
.parse::<usize>()
.expect("Var should be number")),
term_vec[node_elements[1]
.parse::<usize>()
.expect("Termpos should be a valid number")],
term_vec[node_elements[2]
.parse::<usize>()
.expect("Termpos should be a valid number")],
);
term_vec.push(new_term);
}
*new_ac = *term_vec
.last()
.expect("There should be one element in the vector");
}
}
});
result
}
/// Instantiates a new ADF, based on a biodivine adf
pub fn from_biodivine(bio_adf: &super::adfbiodivine::Adf) -> Self {
Self::from_biodivine_vector(bio_adf.var_container(), bio_adf.ac())
}
fn term(&mut self, formula: &Formula) -> Term { fn term(&mut self, formula: &Formula) -> Term {
match formula { match formula {
Formula::Bot => Bdd::constant(false), Formula::Bot => Bdd::constant(false),
@ -107,12 +175,14 @@ impl Adf {
/// Computes the grounded extension and returns it as a list /// Computes the grounded extension and returns it as a list
pub fn grounded(&mut self) -> Vec<Term> { pub fn grounded(&mut self) -> Vec<Term> {
log::info!("[Start] grounded");
let ac = &self.ac.clone(); let ac = &self.ac.clone();
self.grounded_internal(ac) let result = self.grounded_internal(ac);
log::info!("[Done] grounded");
result
} }
fn grounded_internal(&mut self, interpretation: &[Term]) -> Vec<Term> { fn grounded_internal(&mut self, interpretation: &[Term]) -> Vec<Term> {
log::info!("[Start] grounded");
let mut t_vals: usize = interpretation let mut t_vals: usize = interpretation
.iter() .iter()
.filter(|elem| elem.is_truth_value()) .filter(|elem| elem.is_truth_value())
@ -149,36 +219,17 @@ impl Adf {
break; break;
} }
} }
log::info!("[Done] grounded");
new_interpretation new_interpretation
} }
/// Computes the first `max_values` stable models
/// if max_values is 0, then all will be computed
pub fn stable(&mut self, max_values: usize) -> Vec<Vec<Term>> {
let grounded = self.grounded();
if max_values == 0 {
self.stable_iter(&grounded).collect()
} else {
self.stable_iter(&grounded)
.enumerate()
.take_while(|(idx, _elem)| *idx < max_values)
.map(|(_, elem)| elem)
.collect()
}
}
/// Computes the stable models /// Computes the stable models
/// Returns an Iterator which contains all stable models /// Returns an Iterator which contains all stable models
fn stable_iter<'a, 'b, 'c>( pub fn stable<'a, 'c>(&'a mut self) -> impl Iterator<Item = Vec<Term>> + 'c
&'a mut self,
grounded: &'b [Term],
) -> impl Iterator<Item = Vec<Term>> + 'c
where where
'a: 'c, 'a: 'c,
'b: 'c,
{ {
TwoValuedInterpretationsIterator::new(grounded) let grounded = self.grounded();
TwoValuedInterpretationsIterator::new(&grounded)
.map(|interpretation| { .map(|interpretation| {
let mut interpr = self.ac.clone(); let mut interpr = self.ac.clone();
for ac in interpr.iter_mut() { for ac in interpr.iter_mut() {
@ -209,33 +260,15 @@ impl Adf {
.map(|(int, _grd)| int) .map(|(int, _grd)| int)
} }
/// Computes the first `max_values` stable models
/// if max_values is 0, then all will be computed
pub fn complete(&mut self, max_values: usize) -> Vec<Vec<Term>> {
let grounded = self.grounded();
if max_values == 0 {
self.complete_iter(&grounded).collect()
} else {
self.complete_iter(&grounded)
.enumerate()
.take_while(|(idx, _elem)| *idx < max_values)
.map(|(_, elem)| elem)
.collect()
}
}
/// Computes the complete models /// Computes the complete models
/// Returns an Iterator which contains all complete models /// Returns an Iterator which contains all complete models
fn complete_iter<'a, 'b, 'c>( pub fn complete<'a, 'c>(&'a mut self) -> impl Iterator<Item = Vec<Term>> + 'c
&'a mut self,
grounded: &'b [Term],
) -> impl Iterator<Item = Vec<Term>> + 'c
where where
'a: 'c, 'a: 'c,
'b: 'c,
{ {
let grounded = self.grounded();
let ac = self.ac.clone(); let ac = self.ac.clone();
ThreeValuedInterpretationsIterator::new(grounded).filter(move |interpretation| { ThreeValuedInterpretationsIterator::new(&grounded).filter(move |interpretation| {
interpretation.iter().enumerate().all(|(ac_idx, it)| { interpretation.iter().enumerate().all(|(ac_idx, it)| {
log::trace!("idx [{}], term: {}", ac_idx, it); log::trace!("idx [{}], term: {}", ac_idx, it);
it.compare_inf(&interpretation.iter().enumerate().fold( it.compare_inf(&interpretation.iter().enumerate().fold(
@ -262,6 +295,11 @@ impl Adf {
{ {
PrintableInterpretation::new(interpretation, &self.ordering) PrintableInterpretation::new(interpretation, &self.ordering)
} }
/// creates a [PrintDictionary] for output purposes
pub fn print_dictionary(&self) -> PrintDictionary {
PrintDictionary::new(&self.ordering)
}
} }
#[cfg(test)] #[cfg(test)]
@ -358,53 +396,42 @@ mod test {
.unwrap(); .unwrap();
let mut adf = Adf::from_parser(&parser); let mut adf = Adf::from_parser(&parser);
let mut stable = adf.stable();
assert_eq!( assert_eq!(
adf.stable(0), stable.next(),
vec![vec![ Some(vec![
Term::TOP, Term::TOP,
Term::BOT, Term::BOT,
Term::BOT, Term::BOT,
Term::TOP, Term::TOP,
Term::BOT, Term::BOT,
Term::TOP Term::TOP
]] ])
);
assert_eq!(
adf.stable(10),
vec![vec![
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
]]
); );
assert_eq!(stable.next(), None);
let parser = AdfParser::default(); let parser = AdfParser::default();
parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap(); parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap();
let mut adf = Adf::from_parser(&parser); let mut adf = Adf::from_parser(&parser);
let mut stable = adf.stable();
assert_eq!(adf.stable(1), vec![vec![Term::BOT, Term::TOP]]); assert_eq!(stable.next(), Some(vec![Term::BOT, Term::TOP]));
assert_eq!(adf.stable(2), adf.stable(0)); assert_eq!(stable.next(), Some(vec![Term::TOP, Term::BOT]));
assert_eq!( assert_eq!(stable.next(), None);
adf.stable(0),
vec![vec![Term::BOT, Term::TOP], vec![Term::TOP, Term::BOT]]
);
let parser = AdfParser::default(); let parser = AdfParser::default();
parser.parse()("s(a).s(b).ac(a,b).ac(b,a).").unwrap(); parser.parse()("s(a).s(b).ac(a,b).ac(b,a).").unwrap();
let mut adf = Adf::from_parser(&parser); let mut adf = Adf::from_parser(&parser);
assert_eq!(adf.stable(0), vec![vec![Term::BOT, Term::BOT]]); assert_eq!(
adf.stable().collect::<Vec<_>>(),
vec![vec![Term::BOT, Term::BOT]]
);
let parser = AdfParser::default(); let parser = AdfParser::default();
parser.parse()("s(a).s(b).ac(a,neg(a)).ac(b,a).").unwrap(); parser.parse()("s(a).s(b).ac(a,neg(a)).ac(b,a).").unwrap();
let mut adf = Adf::from_parser(&parser); let mut adf = Adf::from_parser(&parser);
assert_eq!(adf.stable().next(), None);
let empty: Vec<Vec<Term>> = Vec::new();
assert_eq!(adf.stable(0), empty);
assert_eq!(adf.stable(99999), empty);
} }
#[test] #[test]
@ -415,12 +442,12 @@ mod test {
let mut adf = Adf::from_parser(&parser); let mut adf = Adf::from_parser(&parser);
assert_eq!( assert_eq!(
adf.complete(1), adf.complete().next(),
vec![vec![Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)]] Some(vec![Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)])
); );
assert_eq!( assert_eq!(
adf.complete(0), adf.complete().collect::<Vec<_>>(),
[ [
[Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)], [Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)],
[Term(1), Term(1), Term(1), Term(0), Term(0), Term(1)], [Term(1), Term(1), Term(1), Term(0), Term(0), Term(1)],
@ -436,15 +463,16 @@ mod test {
.unwrap(); .unwrap();
let mut adf = Adf::from_parser(&parser); let mut adf = Adf::from_parser(&parser);
assert_eq!( assert_eq!(
adf.complete(0), adf.complete().collect::<Vec<_>>(),
[ [
[Term(1), Term(3), Term(3), Term(7)], [Term(1), Term(3), Term(3), Term(7)],
[Term(1), Term(1), Term(1), Term(0)], [Term(1), Term(1), Term(1), Term(0)],
[Term(1), Term(0), Term(0), Term(1)] [Term(1), Term(0), Term(0), Term(1)]
] ]
); );
for model in adf.complete(0) { let printer = adf.print_dictionary();
println!("{}", adf.print_interpretation(&model)); for model in adf.complete() {
println!("{}", printer.print_interpretation(&model));
} }
} }
} }

467
src/adfbiodivine.rs Normal file
View File

@ -0,0 +1,467 @@
//! This module describes the abstract dialectical framework
//! utilising the biodivine-lib-bdd (see <https://github.com/sybila/biodivine-lib-bdd>) BDD implementation to compute the
//! - grounded
//! - stable
//! - complete
//! semantics of ADFs.
use crate::{
datatypes::{
adf::{
PrintDictionary, PrintableInterpretation, ThreeValuedInterpretationsIterator,
TwoValuedInterpretationsIterator, VarContainer,
},
Term,
},
parser::AdfParser,
};
use biodivine_lib_bdd::Bdd;
#[derive(Debug)]
/// Representation of an ADF, with an ordering and dictionary of statement <-> number relations, a binary decision diagram, and a list of acceptance functions in biodivine representation together with a variable-list (needed by biodivine)
pub struct Adf {
ordering: VarContainer,
ac: Vec<Bdd>,
vars: Vec<biodivine_lib_bdd::BddVariable>,
}
impl Adf {
/// Instantiates a new ADF, based on the parser-data
pub fn from_parser(parser: &AdfParser) -> Self {
log::info!("[Start] instantiating BDD");
let mut bdd_var_builder = biodivine_lib_bdd::BddVariableSetBuilder::new();
let namelist = parser.namelist_rc_refcell().as_ref().borrow().clone();
let slice_vec: Vec<&str> = namelist.iter().map(<_>::as_ref).collect();
bdd_var_builder.make_variables(&slice_vec);
let bdd_variables = bdd_var_builder.build();
let mut result = Self {
ordering: VarContainer::from_parser(
parser.namelist_rc_refcell(),
parser.dict_rc_refcell(),
),
ac: vec![
bdd_variables.mk_false();
parser.namelist_rc_refcell().as_ref().borrow().len()
],
vars: bdd_variables.variables(),
};
log::trace!("variable order: {:?}", result.vars);
log::debug!("[Start] adding acs");
parser
.formula_order()
.iter()
.enumerate()
.for_each(|(insert_order, new_order)| {
log::trace!(
"Pos {}/{} formula {}, {:?}",
insert_order + 1,
parser.formula_count(),
new_order,
parser.ac_at(insert_order)
);
result.ac[*new_order] = bdd_variables
.eval_expression(&parser.ac_at(insert_order).unwrap().to_boolean_expr());
log::trace!("instantiated {}", result.ac[*new_order]);
});
log::info!("[Success] instantiated");
result
}
pub(crate) fn var_container(&self) -> &VarContainer {
&self.ordering
}
pub(crate) fn ac(&self) -> &[Bdd] {
&self.ac
}
/// Computes the grounded extension and returns it as a list
pub fn grounded(&self) -> Vec<Term> {
log::info!("[Start] grounded");
let ac = &self.ac.clone();
let result = self
.grounded_internal(ac)
.iter()
.map(|elem| elem.into())
.collect();
log::info!("[Done] grounded");
result
}
fn var_list(&self, interpretation: &[Bdd]) -> Vec<(biodivine_lib_bdd::BddVariable, bool)> {
self.vars
.iter()
.enumerate()
.filter(|(idx, _elem)| interpretation[*idx].is_truth_value())
.map(|(idx, elem)| (*elem, interpretation[idx].is_true()))
.collect::<Vec<(biodivine_lib_bdd::BddVariable, bool)>>()
}
fn var_list_from_term(
&self,
interpretation: &[Term],
) -> Vec<(biodivine_lib_bdd::BddVariable, bool)> {
self.vars
.iter()
.enumerate()
.filter(|(idx, _elem)| interpretation[*idx].is_truth_value())
.map(|(idx, elem)| (*elem, interpretation[idx].is_true()))
.collect::<Vec<(biodivine_lib_bdd::BddVariable, bool)>>()
}
pub(crate) fn grounded_internal(&self, interpretation: &[Bdd]) -> Vec<Bdd> {
let mut new_interpretation: Vec<Bdd> = interpretation.into();
loop {
let mut truth_extention: bool = false;
// let var_list: Vec<(biodivine_lib_bdd::BddVariable, bool)> = self
// .vars
// .iter()
// .enumerate()
// .filter(|(idx, _elem)| new_interpretation[*idx].is_truth_value())
// .map(|(idx, elem)| (*elem, new_interpretation[idx].is_true()))
// .collect();
let var_list = self.var_list(&new_interpretation);
log::trace!("var-list: {:?}", var_list);
for ac in new_interpretation
.iter_mut()
.filter(|elem| !elem.is_truth_value())
{
log::trace!("checking ac: {}", ac);
*ac = ac.restrict(&var_list);
if ac.is_truth_value() {
truth_extention = true;
}
}
if !truth_extention {
break;
}
}
new_interpretation
}
/// Computes the complete models
/// Returns an Iterator which contains all the complete models
pub fn complete<'a, 'b>(&'a self) -> impl Iterator<Item = Vec<Term>> + 'b
where
'a: 'b,
{
ThreeValuedInterpretationsIterator::from_bdd(&self.grounded_internal(&self.ac)).filter(
|terms| {
let var_list = self.var_list_from_term(terms);
self.ac
.iter()
.enumerate()
.all(|(idx, ac)| terms[idx].cmp_information(&ac.restrict(&var_list)))
},
)
}
/// shifts the representation and allows to use the naive approach
pub fn hybrid_step(&self) -> crate::adf::Adf {
crate::adf::Adf::from_biodivine_vector(
self.var_container(),
&self.grounded_internal(self.ac()),
)
}
/// Computes the stable models
/// Returns an Iterator which contains all the stable models
pub fn stable<'a, 'b>(&'a self) -> impl Iterator<Item = Vec<Term>> + 'b
where
'a: 'b,
{
TwoValuedInterpretationsIterator::from_bdd(&self.grounded_internal(&self.ac)).filter(
|terms| {
let reduction_list = self
.vars
.iter()
.enumerate()
.filter_map(|(idx, elem)| {
if terms[idx].is_truth_value() && !terms[idx].is_true() {
Some((*elem, false))
} else {
None
}
})
.collect::<Vec<(biodivine_lib_bdd::BddVariable, bool)>>();
let reduct = self
.ac
.iter()
.map(|ac| ac.restrict(&reduction_list))
.collect::<Vec<_>>();
let complete = self.grounded_internal(&reduct);
terms
.iter()
.zip(complete.iter())
.all(|(left, right)| left.cmp_information(right))
},
)
}
/// creates a [PrintableInterpretation] for output purposes
pub fn print_interpretation<'a, 'b>(
&'a self,
interpretation: &'b [Term],
) -> PrintableInterpretation<'b>
where
'a: 'b,
{
PrintableInterpretation::new(interpretation, &self.ordering)
}
/// creates a [PrintDictionary] for output purposes
pub fn print_dictionary(&self) -> PrintDictionary {
PrintDictionary::new(&self.ordering)
}
}
/// Provides Adf-Specific operations on truth valuations
pub trait AdfOperations {
/// Returns `true` if the BDD is either valid or unsatisfiable
fn is_truth_value(&self) -> bool;
/// Compares whether the information between two given BDDs are the same
fn cmp_information(&self, other: &Self) -> bool;
}
impl AdfOperations for Bdd {
fn is_truth_value(&self) -> bool {
self.is_false() || self.is_true()
}
fn cmp_information(&self, other: &Self) -> bool {
self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true()
}
}
/// Implementations of the restrict-operations on BDDs
pub trait BddRestrict {
/// Provides an implementation of the restrict-operation on BDDs for one variable
fn var_restrict(&self, variable: biodivine_lib_bdd::BddVariable, value: bool) -> Self;
/// Provides an implementation of the restrict-operation on a set of variables
fn restrict(&self, variables: &[(biodivine_lib_bdd::BddVariable, bool)]) -> Self;
}
impl BddRestrict for Bdd {
fn var_restrict(&self, variable: biodivine_lib_bdd::BddVariable, value: bool) -> Bdd {
self.var_select(variable, value).var_project(variable)
}
fn restrict(&self, variables: &[(biodivine_lib_bdd::BddVariable, bool)]) -> Bdd {
let mut variablelist: Vec<biodivine_lib_bdd::BddVariable> = Vec::new();
variables
.iter()
.for_each(|(var, _val)| variablelist.push(*var));
self.select(variables).project(&variablelist)
}
}
impl ThreeValuedInterpretationsIterator {
fn from_bdd(bdd: &[Bdd]) -> Self {
let terms = bdd.iter().map(|value| value.into()).collect::<Vec<_>>();
Self::new(&terms)
}
}
impl TwoValuedInterpretationsIterator {
fn from_bdd(bdd: &[Bdd]) -> Self {
let terms = bdd.iter().map(|value| value.into()).collect::<Vec<_>>();
Self::new(&terms)
}
}
#[cfg(test)]
mod test {
use super::*;
use test_log::test;
use biodivine_lib_bdd::*;
#[test]
fn biodivine_internals() {
let mut builder = BddVariableSetBuilder::new();
let [_a, _b, _c] = builder.make(&["a", "b", "c"]);
let variables: BddVariableSet = builder.build();
let a = variables.eval_expression_string("a");
let b = variables.eval_expression_string("b");
let c = variables.eval_expression_string("c");
let d = variables.eval_expression_string("a & b & c");
let e = variables.eval_expression_string("a ^ b");
let t = variables.eval_expression(&boolean_expression::BooleanExpression::Const(true));
let f = variables.eval_expression(&boolean_expression::BooleanExpression::Const(false));
println!("{:?}", a.to_string());
println!("{:?}", a.to_bytes());
println!("{:?}", b.to_string());
println!("{:?}", c.to_string());
println!("{:?}", d.to_string());
println!("{:?}", e.to_string());
println!("{:?}", e.to_bytes());
println!("{:?}", t.to_string());
println!("{:?}", f.to_string());
}
#[test]
fn grounded() {
let parser = AdfParser::default();
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
.unwrap();
let adf = Adf::from_parser(&parser);
let xor = adf.ac[5].clone();
assert!(xor
.var_restrict(adf.vars[4], false)
.var_restrict(adf.vars[0], true)
.var_restrict(adf.vars[1], true)
.var_restrict(adf.vars[2], false)
.is_true());
let result = adf.grounded();
assert_eq!(
result,
vec![
Term::TOP,
Term::UND,
Term::UND,
Term::UND,
Term::BOT,
Term::TOP
]
);
assert_eq!(
format!("{}", adf.print_interpretation(&result)),
"T(a) u(b) u(c) u(d) F(e) T(f) \n"
);
let parser = AdfParser::default();
parser.parse()(
"s(a).s(b).s(c).s(d).s(e).ac(a,c(v)).ac(b,a).ac(c,b).ac(d,neg(c)).ac(e,and(a,d)).",
)
.unwrap();
let adf = Adf::from_parser(&parser);
let result = adf.grounded();
assert_eq!(
result,
vec![Term::TOP, Term::TOP, Term::TOP, Term::BOT, Term::BOT]
);
}
#[test]
fn complete() {
let parser = AdfParser::default();
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
.unwrap();
let adf = Adf::from_parser(&parser);
assert_eq!(
adf.complete().next(),
Some(vec![
Term::TOP,
Term::UND,
Term::UND,
Term::UND,
Term::BOT,
Term::TOP
])
);
assert_eq!(
adf.complete().collect::<Vec<_>>(),
[
[
Term::TOP,
Term::UND,
Term::UND,
Term::UND,
Term::BOT,
Term::TOP
],
[
Term::TOP,
Term::TOP,
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP
],
[
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
]
]
);
}
#[test]
fn complete2() {
let parser = AdfParser::default();
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).")
.unwrap();
let adf = Adf::from_parser(&parser);
assert_eq!(
adf.complete().collect::<Vec<_>>(),
[
[Term::TOP, Term::UND, Term::UND, Term::UND],
[Term::TOP, Term::TOP, Term::TOP, Term::BOT],
[Term::TOP, Term::BOT, Term::BOT, Term::TOP]
]
);
}
#[test]
fn stable() {
let parser = AdfParser::default();
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
.unwrap();
let adf = Adf::from_parser(&parser);
let mut stable = adf.stable();
assert_eq!(
stable.next(),
Some(vec![
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
])
);
assert_eq!(stable.next(), None);
let parser = AdfParser::default();
parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap();
let adf = Adf::from_parser(&parser);
let mut stable = adf.stable();
assert_eq!(stable.next(), Some(vec![Term::BOT, Term::TOP]));
assert_eq!(stable.next(), Some(vec![Term::TOP, Term::BOT]));
assert_eq!(stable.next(), None);
let parser = AdfParser::default();
parser.parse()("s(a).s(b).ac(a,b).ac(b,a).").unwrap();
let adf = Adf::from_parser(&parser);
assert_eq!(adf.stable().next(), Some(vec![Term::BOT, Term::BOT]));
for model in adf.stable() {
let printer = adf.print_dictionary();
assert_eq!(
format!("{}", adf.print_interpretation(&model)),
format!("{}", printer.print_interpretation(&model))
);
}
let parser = AdfParser::default();
parser.parse()("s(a).s(b).ac(a,neg(a)).ac(b,a).").unwrap();
let adf = Adf::from_parser(&parser);
assert_eq!(adf.stable().next(), None);
}
}

193
src/bin/adf_bdd.rs Normal file
View File

@ -0,0 +1,193 @@
use std::{fs::File, path::PathBuf};
use adf_bdd::adf::Adf;
use adf_bdd::adfbiodivine::Adf as BdAdf;
use adf_bdd::parser::AdfParser;
use clap::{crate_authors, crate_description, crate_name, crate_version};
use structopt::StructOpt;
#[derive(StructOpt, Debug)]
#[structopt(name = crate_name!(), about = crate_description!(), author = crate_authors!(), version = crate_version!())]
struct App {
/// Input filename
#[structopt(parse(from_os_str))]
input: PathBuf,
/// Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
#[structopt(long = "rust_log", env)]
rust_log: Option<String>,
/// choose the bdd implementation of either 'biodivine', 'naive', or hybrid
#[structopt(long = "lib", default_value = "biodivine")]
implementation: String,
/// Sets log verbosity (multiple times means more verbose)
#[structopt(short, parse(from_occurrences), group = "verbosity")]
verbose: u8,
/// Sets log verbosity to only errors
#[structopt(short, group = "verbosity")]
quiet: bool,
/// Sorts variables in an lexicographic manner
#[structopt(long = "lx", group = "sorting")]
sort_lex: bool,
/// Sorts variables in an alphanumeric manner
#[structopt(long = "an", group = "sorting")]
sort_alphan: bool,
/// Compute the grounded model
#[structopt(long = "grd")]
grounded: bool,
/// Compute the stable models
#[structopt(long = "stm")]
stable: bool,
/// Compute the complete models
#[structopt(long = "com")]
complete: bool,
/// Import an adf- bdd state instead of an adf
#[structopt(long)]
import: bool,
/// Export the adf-bdd state after parsing and BDD instantiation to the given filename
#[structopt(long)]
export: Option<PathBuf>,
}
impl App {
fn run(&self) {
let filter_level = match self.verbose {
1 => log::LevelFilter::Info,
2 => log::LevelFilter::Debug,
3 => log::LevelFilter::Trace,
_ => {
if self.quiet {
log::LevelFilter::Error
} else if let Some(rust_log) = self.rust_log.clone() {
match rust_log.as_str() {
"error" => log::LevelFilter::Error,
"info" => log::LevelFilter::Info,
"debug" => log::LevelFilter::Debug,
"trace" => log::LevelFilter::Trace,
_ => log::LevelFilter::Warn,
}
} else {
log::LevelFilter::Warn
}
}
};
env_logger::builder().filter_level(filter_level).init();
log::info!("Version: {}", crate_version!());
let input = std::fs::read_to_string(self.input.clone()).expect("Error Reading File");
match self.implementation.as_str() {
"hybrid" => {
let parser = adf_bdd::parser::AdfParser::default();
parser.parse()(&input).unwrap();
log::info!("[Done] parsing");
if self.sort_lex {
parser.varsort_lexi();
}
if self.sort_alphan {
parser.varsort_alphanum();
}
let adf = BdAdf::from_parser(&parser);
if self.grounded {
let grounded = adf.grounded();
print!("{}", adf.print_interpretation(&grounded));
}
let mut naive_adf = adf.hybrid_step();
let printer = naive_adf.print_dictionary();
if self.complete {
for model in naive_adf.complete() {
print!("{}", printer.print_interpretation(&model));
}
}
if self.stable {
for model in naive_adf.stable() {
print!("{}", printer.print_interpretation(&model));
}
}
}
"biodivine" => {
let parser = adf_bdd::parser::AdfParser::default();
parser.parse()(&input).unwrap();
log::info!("[Done] parsing");
if self.sort_lex {
parser.varsort_lexi();
}
if self.sort_alphan {
parser.varsort_alphanum();
}
let adf = BdAdf::from_parser(&parser);
if self.grounded {
let grounded = adf.grounded();
print!("{}", adf.print_interpretation(&grounded));
}
if self.complete {
for model in adf.complete() {
print!("{}", adf.print_interpretation(&model));
}
}
if self.stable {
for model in adf.stable() {
print!("{}", adf.print_interpretation(&model));
}
}
}
_ => {
let mut adf = if self.import {
serde_json::from_str(&input).unwrap()
} else {
let parser = AdfParser::default();
parser.parse()(&input).unwrap();
log::info!("[Done] parsing");
if self.sort_lex {
parser.varsort_lexi();
}
if self.sort_alphan {
parser.varsort_alphanum();
}
Adf::from_parser(&parser)
};
if let Some(export) = &self.export {
if export.exists() {
log::error!(
"Cannot write JSON file <{}>, as it already exists",
export.to_string_lossy()
);
} else {
let export_file = match File::create(&export) {
Err(reason) => {
panic!("couldn't create {}: {}", export.to_string_lossy(), reason)
}
Ok(file) => file,
};
serde_json::to_writer(export_file, &adf).unwrap_or_else(|_| {
panic!("Writing JSON file {} failed", export.to_string_lossy())
});
}
}
if self.grounded {
let grounded = adf.grounded();
print!("{}", adf.print_interpretation(&grounded));
}
if self.complete {
let printer = adf.print_dictionary();
for model in adf.complete() {
print!("{}", printer.print_interpretation(&model));
}
}
if self.stable {
let printer = adf.print_dictionary();
for model in adf.stable() {
print!("{}", printer.print_interpretation(&model));
}
}
}
}
}
}
fn main() {
let app = App::from_args();
app.run();
}

View File

@ -1,10 +1,9 @@
//! Repesentation of all needed ADF based datatypes //! Repesentation of all needed ADF based datatypes
use super::{Term, Var};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc}; use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc};
use super::{Term, Var};
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub(crate) struct VarContainer { pub(crate) struct VarContainer {
names: Rc<RefCell<Vec<String>>>, names: Rc<RefCell<Vec<String>>>,
@ -28,6 +27,13 @@ impl VarContainer {
VarContainer { names, mapping } VarContainer { names, mapping }
} }
pub fn copy(from: &Self) -> Self {
VarContainer {
names: from.names.clone(),
mapping: from.mapping.clone(),
}
}
pub fn variable(&self, name: &str) -> Option<Var> { pub fn variable(&self, name: &str) -> Option<Var> {
self.mapping.borrow().get(name).map(|val| Var(*val)) self.mapping.borrow().get(name).map(|val| Var(*val))
} }
@ -36,12 +42,36 @@ impl VarContainer {
self.names.borrow().get(var.value()).cloned() self.names.borrow().get(var.value()).cloned()
} }
#[allow(dead_code)]
pub fn names(&self) -> Rc<RefCell<Vec<String>>> { pub fn names(&self) -> Rc<RefCell<Vec<String>>> {
Rc::clone(&self.names) Rc::clone(&self.names)
} }
} }
/// A struct which holds the dictionary to print interpretations and allows to instantiate printable interpretations
#[derive(Debug)]
pub struct PrintDictionary {
ordering: VarContainer,
}
/// A struct to print a representation, it will be instantiated by [Adf] by calling the method [`Adf::print_interpretation`]. impl PrintDictionary {
pub(crate) fn new(order: &VarContainer) -> Self {
Self {
ordering: VarContainer::copy(order),
}
}
/// creates a [PrintableInterpretation] for output purposes
pub fn print_interpretation<'a, 'b>(
&'a self,
interpretation: &'b [Term],
) -> PrintableInterpretation<'b>
where
'a: 'b,
{
PrintableInterpretation::new(interpretation, &self.ordering)
}
}
/// A struct to print a representation, it will be instantiated by [Adf][crate::adf::Adf] by calling the method [print_interpretation][`crate::adf::Adf::print_interpretation`].
#[derive(Debug)] #[derive(Debug)]
pub struct PrintableInterpretation<'a> { pub struct PrintableInterpretation<'a> {
interpretation: &'a [Term], interpretation: &'a [Term],
@ -179,29 +209,6 @@ impl ThreeValuedInterpretationsIterator {
if !ThreeValuedInterpretationsIterator::decrement_vec(current) { if !ThreeValuedInterpretationsIterator::decrement_vec(current) {
self.current = None; self.current = None;
} }
// if let Some((pos, val)) = current.iter().enumerate().find(|(idx, val)| **val > 0) {
// if pos > 0 && *val == 2 {
// for elem in &mut current[0..pos] {
// *elem = 2;
// }
// }
// current[pos] -= 1;
// if self.last_iteration {
// if current.iter().all(|val| *val == 0) {
// self.current = None;
// }
// }
// } else if !self.last_iteration {
// let len = current.len();
// if len <= 1 {
// self.current = None;
// } else {
// for elem in &mut current[0..len - 1] {
// *elem = 2;
// }
// }
// self.last_iteration = true;
//}
} }
} }
@ -230,7 +237,7 @@ impl Iterator for ThreeValuedInterpretationsIterator {
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
if self.started { if self.started {
if let Some(current) = &self.current { if self.current.is_some() {
self.decrement(); self.decrement();
} }
} else { } else {
@ -336,7 +343,7 @@ mod test {
assert_eq!(iter.next(), None); assert_eq!(iter.next(), None);
let testinterpretation = vec![Term(1), Term(3), Term(3), Term(7)]; let testinterpretation = vec![Term(1), Term(3), Term(3), Term(7)];
let mut iter: Vec<Vec<Term>> = let iter: Vec<Vec<Term>> =
ThreeValuedInterpretationsIterator::new(&testinterpretation).collect(); ThreeValuedInterpretationsIterator::new(&testinterpretation).collect();
assert_eq!( assert_eq!(
iter, iter,

View File

@ -4,6 +4,8 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::{fmt::Display, ops::Deref}; use std::{fmt::Display, ops::Deref};
use crate::adfbiodivine::AdfOperations;
/// Representation of a Term /// Representation of a Term
/// Each Term is represented in a number ([usize]) and relates to a /// Each Term is represented in a number ([usize]) and relates to a
/// Node in the decision diagram /// Node in the decision diagram
@ -23,6 +25,18 @@ impl From<usize> for Term {
} }
} }
impl From<&biodivine_lib_bdd::Bdd> for Term {
fn from(val: &biodivine_lib_bdd::Bdd) -> Self {
if val.is_true() {
Term::TOP
} else if val.is_false() {
Term::BOT
} else {
Term::UND
}
}
}
impl Display for Term { impl Display for Term {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Term({})", self.0) write!(f, "Term({})", self.0)
@ -34,6 +48,8 @@ impl Term {
pub const BOT: Term = Term(0); pub const BOT: Term = Term(0);
/// Represents the truth-value top, i.e. true /// Represents the truth-value top, i.e. true
pub const TOP: Term = Term(1); pub const TOP: Term = Term(1);
/// Represents the truth-value undecided, i.e. sat, but not valid
pub const UND: Term = Term(2);
/// Get the value of the Term, i.e. the corresponding [usize] /// Get the value of the Term, i.e. the corresponding [usize]
pub fn value(self) -> usize { pub fn value(self) -> usize {
@ -55,6 +71,11 @@ impl Term {
pub fn compare_inf(&self, other: &Self) -> bool { pub fn compare_inf(&self, other: &Self) -> bool {
self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true() self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true()
} }
/// Returns true, if the Term and the BDD have the same information-value
pub fn cmp_information(&self, other: &biodivine_lib_bdd::Bdd) -> bool {
self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true()
}
} }
/// Representation of Variables /// Representation of Variables

View File

@ -8,6 +8,47 @@
//! Note that one advantage of this implementation is that only one oBDD is used for all acceptance conditions. This can be done because all of them have the identical signature (i.e. the set of all statements + top and bottom concepts). //! Note that one advantage of this implementation is that only one oBDD is used for all acceptance conditions. This can be done because all of them have the identical signature (i.e. the set of all statements + top and bottom concepts).
//! Due to this uniform representation reductions on subformulae which are shared by two or more statements only need to be computed once and is already cached in the data structure for further applications. //! Due to this uniform representation reductions on subformulae which are shared by two or more statements only need to be computed once and is already cached in the data structure for further applications.
//! //!
//! # Usage
//! ```
//! USAGE:
//! adf_bdd [FLAGS] [OPTIONS] <input>
//!
//! FLAGS:
//! --com Compute the complete models
//! --grd Compute the grounded model
//! -h, --help Prints help information
//! --import Import an adf- bdd state instead of an adf
//! -q Sets log verbosity to only errors
//! --an Sorts variables in an alphanumeric manner
//! --lx Sorts variables in an lexicographic manner
//! --stm Compute the stable models
//! -V, --version Prints version information
//! -v Sets log verbosity (multiple times means more verbose)
//!
//! OPTIONS:
//! --export <export> Export the adf-bdd state after parsing and BDD instantiation to the given filename
//! --lib <implementation> choose the bdd implementation of either 'biodivine', 'naive', or hybrid [default:
//! biodivine]
//! --rust_log <rust-log> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
//! [env: RUST_LOG=debug]
//!
//! ARGS:
//! <input> Input filename
//! ```
//!
//! Note that import and export only works if the naive library is chosen
//!
//! Right now there is no additional information to the computed models, so if you use --com --grd --stm the borders between the results are not obviously communicated.
//! They can be easily identified though:
//! - The computation is always in the same order
//! - grd
//! - com
//! - stm
//! - We know that there is always exactly one grounded model
//! - We know that there always exist at least one complete model (i.e. the grounded one)
//! - We know that there does not need to exist a stable model
//! - We know that every stable model is a complete model too
//!
//! # Input-file format: //! # Input-file format:
//! Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement. //! Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
//! The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows: //! The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
@ -48,7 +89,10 @@
)] )]
pub mod adf; pub mod adf;
pub mod adfbiodivine;
pub mod datatypes; pub mod datatypes;
pub mod obdd; pub mod obdd;
pub mod parser; pub mod parser;
#[cfg(test)]
mod test;
//pub mod obdd2; //pub mod obdd2;

View File

@ -1,132 +0,0 @@
pub mod adf;
pub mod datatypes;
pub mod obdd;
pub mod parser;
use std::{fs::File, path::PathBuf};
use adf::Adf;
use clap::{crate_authors, crate_description, crate_name, crate_version};
use parser::AdfParser;
use structopt::StructOpt;
#[derive(StructOpt, Debug)]
#[structopt(name = crate_name!(), about = crate_description!(), author = crate_authors!(), version = crate_version!())]
struct App {
/// Input filename
#[structopt(parse(from_os_str))]
input: PathBuf,
/// Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
#[structopt(long = "rust_log", env)]
rust_log: Option<String>,
/// Sets log verbosity (multiple times means more verbose)
#[structopt(short, parse(from_occurrences), group = "verbosity")]
verbose: u8,
/// Sets log verbosity to only errors
#[structopt(short, group = "verbosity")]
quiet: bool,
/// Sorts variables in an lexicographic manner
#[structopt(long = "lx", group = "sorting")]
sort_lex: bool,
/// Sorts variables in an alphanumeric manner
#[structopt(long = "an", group = "sorting")]
sort_alphan: bool,
/// Compute the grounded model
#[structopt(long = "grd")]
grounded: bool,
/// Compute the stable models
#[structopt(long = "stm")]
stable: bool,
/// Compute the complete models
#[structopt(long = "com")]
complete: bool,
/// Import an adf- bdd state instead of an adf
#[structopt(long)]
import: bool,
/// Export the adf-bdd state after parsing and BDD instantiation to the given filename
#[structopt(long)]
export: Option<PathBuf>,
}
impl App {
fn run(&self) {
let filter_level = match self.verbose {
1 => log::LevelFilter::Info,
2 => log::LevelFilter::Debug,
3 => log::LevelFilter::Trace,
_ => {
if self.quiet {
log::LevelFilter::Error
} else if let Some(rust_log) = self.rust_log.clone() {
match rust_log.as_str() {
"error" => log::LevelFilter::Error,
"info" => log::LevelFilter::Info,
"debug" => log::LevelFilter::Debug,
"trace" => log::LevelFilter::Trace,
_ => log::LevelFilter::Warn,
}
} else {
log::LevelFilter::Warn
}
}
};
env_logger::builder().filter_level(filter_level).init();
log::info!("Version: {}", crate_version!());
let input = std::fs::read_to_string(self.input.clone()).expect("Error Reading File");
let mut adf = if self.import {
serde_json::from_str(&input).unwrap()
} else {
let parser = AdfParser::default();
parser.parse()(&input).unwrap();
log::info!("[Done] parsing");
if self.sort_lex {
parser.varsort_lexi();
}
if self.sort_alphan {
parser.varsort_alphanum();
}
Adf::from_parser(&parser)
};
if let Some(export) = &self.export {
if export.exists() {
log::error!(
"Cannot write JSON file <{}>, as it already exists",
export.to_string_lossy()
);
} else {
let export_file = match File::create(&export) {
Err(reason) => {
panic!("couldn't create {}: {}", export.to_string_lossy(), reason)
}
Ok(file) => file,
};
serde_json::to_writer(export_file, &adf).unwrap_or_else(|_| {
panic!("Writing JSON file {} failed", export.to_string_lossy())
});
}
}
if self.grounded {
let grounded = adf.grounded();
print!("{}", adf.print_interpretation(&grounded));
}
if self.complete {
let complete = adf.complete(0);
for model in complete {
print!("{}", adf.print_interpretation(&model));
}
}
if self.stable {
let stable = adf.stable(0);
for model in stable {
print!("{}", adf.print_interpretation(&model));
}
}
}
}
fn main() {
let app = App::from_args();
app.run();
}

View File

@ -115,7 +115,7 @@ impl Bdd {
self.node(minvar, bot_ite, top_ite) self.node(minvar, bot_ite, top_ite)
} }
} }
fn node(&mut self, var: Var, lo: Term, hi: Term) -> Term { pub fn node(&mut self, var: Var, lo: Term, hi: Term) -> Term {
if lo == hi { if lo == hi {
lo lo
} else { } else {
@ -238,7 +238,7 @@ mod test {
let v3 = bdd.variable(Var(2)); let v3 = bdd.variable(Var(2));
let a1 = bdd.and(v1, v2); let a1 = bdd.and(v1, v2);
let a2 = bdd.or(a1, v3); let _a2 = bdd.or(a1, v3);
assert_eq!(format!("{}", bdd), " \n0 BddNode: Var(18446744073709551614), lo: Term(0), hi: Term(0)\n1 BddNode: Var(18446744073709551615), lo: Term(1), hi: Term(1)\n2 BddNode: Var(0), lo: Term(0), hi: Term(1)\n3 BddNode: Var(1), lo: Term(0), hi: Term(1)\n4 BddNode: Var(2), lo: Term(0), hi: Term(1)\n5 BddNode: Var(0), lo: Term(0), hi: Term(3)\n6 BddNode: Var(1), lo: Term(4), hi: Term(1)\n7 BddNode: Var(0), lo: Term(4), hi: Term(6)\n"); assert_eq!(format!("{}", bdd), " \n0 BddNode: Var(18446744073709551614), lo: Term(0), hi: Term(0)\n1 BddNode: Var(18446744073709551615), lo: Term(1), hi: Term(1)\n2 BddNode: Var(0), lo: Term(0), hi: Term(1)\n3 BddNode: Var(1), lo: Term(0), hi: Term(1)\n4 BddNode: Var(2), lo: Term(0), hi: Term(1)\n5 BddNode: Var(0), lo: Term(0), hi: Term(3)\n6 BddNode: Var(1), lo: Term(4), hi: Term(1)\n7 BddNode: Var(0), lo: Term(4), hi: Term(6)\n");
} }

View File

@ -35,6 +35,55 @@ pub enum Formula<'a> {
Iff(Box<Formula<'a>>, Box<Formula<'a>>), Iff(Box<Formula<'a>>, Box<Formula<'a>>),
} }
impl Formula<'_> {
pub(crate) fn to_boolean_expr(
&self,
) -> biodivine_lib_bdd::boolean_expression::BooleanExpression {
match self {
Formula::Top => biodivine_lib_bdd::boolean_expression::BooleanExpression::Const(true),
Formula::Bot => biodivine_lib_bdd::boolean_expression::BooleanExpression::Const(false),
Formula::Atom(name) => {
biodivine_lib_bdd::boolean_expression::BooleanExpression::Variable(name.to_string())
}
Formula::Not(subformula) => {
biodivine_lib_bdd::boolean_expression::BooleanExpression::Not(Box::new(
subformula.to_boolean_expr(),
))
}
Formula::And(sub_a, sub_b) => {
biodivine_lib_bdd::boolean_expression::BooleanExpression::And(
Box::new(sub_a.to_boolean_expr()),
Box::new(sub_b.to_boolean_expr()),
)
}
Formula::Or(sub_a, sub_b) => {
biodivine_lib_bdd::boolean_expression::BooleanExpression::Or(
Box::new(sub_a.to_boolean_expr()),
Box::new(sub_b.to_boolean_expr()),
)
}
Formula::Iff(sub_a, sub_b) => {
biodivine_lib_bdd::boolean_expression::BooleanExpression::Iff(
Box::new(sub_a.to_boolean_expr()),
Box::new(sub_b.to_boolean_expr()),
)
}
Formula::Imp(sub_a, sub_b) => {
biodivine_lib_bdd::boolean_expression::BooleanExpression::Imp(
Box::new(sub_a.to_boolean_expr()),
Box::new(sub_b.to_boolean_expr()),
)
}
Formula::Xor(sub_a, sub_b) => {
biodivine_lib_bdd::boolean_expression::BooleanExpression::Xor(
Box::new(sub_a.to_boolean_expr()),
Box::new(sub_b.to_boolean_expr()),
)
}
}
}
}
impl std::fmt::Debug for Formula<'_> { impl std::fmt::Debug for Formula<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
@ -74,7 +123,7 @@ impl std::fmt::Debug for Formula<'_> {
/// Due to an internal representation with [RefCell][std::cell::RefCell] and [Rc][std::rc::Rc] the values can be /// Due to an internal representation with [RefCell][std::cell::RefCell] and [Rc][std::rc::Rc] the values can be
/// handed over to other structures without further storage needs. /// handed over to other structures without further storage needs.
/// ///
/// Note that the parser can be utilised by an [ADF][`crate::datatypes::adf::Adf`] to initialise it with minimal overhead. /// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead.
#[derive(Debug)] #[derive(Debug)]
pub struct AdfParser<'a> { pub struct AdfParser<'a> {
namelist: Rc<RefCell<Vec<String>>>, namelist: Rc<RefCell<Vec<String>>>,

149
src/test.rs Normal file
View File

@ -0,0 +1,149 @@
use super::adf::Adf as NaiveAdf;
use super::adfbiodivine::*;
use super::datatypes::*;
use super::parser::*;
use test_log::test;
#[test]
fn adf_biodivine_cmp_1() {
let parser = AdfParser::default();
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
.unwrap();
let adf = Adf::from_parser(&parser);
let mut naive_adf = NaiveAdf::from_biodivine(&adf);
let result = adf.grounded();
assert_eq!(
result,
vec![
Term::TOP,
Term::UND,
Term::UND,
Term::UND,
Term::BOT,
Term::TOP
]
);
assert_eq!(
format!("{}", adf.print_interpretation(&result)),
"T(a) u(b) u(c) u(d) F(e) T(f) \n"
);
let naive_result = naive_adf.grounded();
assert_eq!(
format!("{}", adf.print_interpretation(&adf.grounded())),
format!("{}", naive_adf.print_interpretation(&naive_result))
);
let printer = naive_adf.print_dictionary();
let mut str1 = String::new();
let mut str2 = String::new();
let mut str3 = String::new();
for model in adf.stable() {
str1 = format!("{}{}", str1, adf.print_interpretation(&model));
}
for model in naive_adf.stable() {
str2 = format!("{}{}", str2, printer.print_interpretation(&model));
}
for model in adf.hybrid_step().stable() {
str3 = format!("{}{}", str3, printer.print_interpretation(&model));
}
assert_eq!(str1, str2);
assert_eq!(str1, str3);
let mut str1 = String::new();
let mut str2 = String::new();
let mut str3 = String::new();
for model in adf.complete() {
str1 = format!("{}{}", str1, adf.print_interpretation(&model));
}
for model in naive_adf.complete() {
str2 = format!("{}{}", str2, printer.print_interpretation(&model));
}
for model in adf.hybrid_step().complete() {
str3 = format!("{}{}", str3, printer.print_interpretation(&model));
}
assert_eq!(str1, str2);
assert_eq!(str1, str3);
let parser = AdfParser::default();
parser.parse()(
"s(a).s(b).s(c).s(d).s(e).ac(a,c(v)).ac(b,a).ac(c,b).ac(d,neg(c)).ac(e,and(a,d)).",
)
.unwrap();
let adf = Adf::from_parser(&parser);
let mut naive_adf = NaiveAdf::from_biodivine(&adf);
let result = adf.grounded();
let naive_result = naive_adf.grounded();
assert_eq!(
result,
vec![Term::TOP, Term::TOP, Term::TOP, Term::BOT, Term::BOT]
);
assert_eq!(
format!("{}", adf.print_interpretation(&result)),
format!("{}", naive_adf.print_interpretation(&naive_result))
);
}
#[test]
fn adf_biodivine_cmp_2() {
let parser = AdfParser::default();
parser.parse()(
"s(a).s(b).s(c).s(d).s(e).ac(a,c(v)).ac(b,a).ac(c,b).ac(d,neg(c)).ac(e,and(a,d)).",
)
.unwrap();
let adf = Adf::from_parser(&parser);
let mut naive_adf = NaiveAdf::from_biodivine(&adf);
let result = adf.grounded();
let naive_result = naive_adf.grounded();
assert_eq!(
result,
vec![Term::TOP, Term::TOP, Term::TOP, Term::BOT, Term::BOT]
);
assert_eq!(
format!("{}", adf.print_interpretation(&result)),
format!("{}", naive_adf.print_interpretation(&naive_result))
);
let printer = naive_adf.print_dictionary();
let mut str1 = String::new();
let mut str2 = String::new();
let mut str3 = String::new();
for model in adf.stable() {
str1 = format!("{}{}", str1, adf.print_interpretation(&model));
}
for model in naive_adf.stable() {
str2 = format!("{}{}", str2, printer.print_interpretation(&model));
}
for model in adf.hybrid_step().stable() {
str3 = format!("{}{}", str3, printer.print_interpretation(&model));
}
assert_eq!(str1, str2);
assert_eq!(str1, str3);
let mut str1 = String::new();
let mut str2 = String::new();
let mut str3 = String::new();
for model in adf.complete() {
str1 = format!("{}{}", str1, adf.print_interpretation(&model));
}
for model in naive_adf.complete() {
str2 = format!("{}{}", str2, printer.print_interpretation(&model));
}
for model in adf.hybrid_step().complete() {
str3 = format!("{}{}", str3, printer.print_interpretation(&model));
}
assert_eq!(str1, str2);
assert_eq!(str1, str3);
}

View File

@ -32,7 +32,149 @@ fn arguments() -> Result<(), Box<dyn std::error::Error>> {
} }
#[test] #[test]
fn runs() -> Result<(), Box<dyn std::error::Error>> { fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
let file = assert_fs::NamedTempFile::new("input_instance.adf")?;
file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1))).")?;
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
let mut cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(wrong_file.path());
cmd.assert()
.failure()
.stderr(predicate::str::contains("code: Eof"));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("-vv")
.arg("--grd")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("-q")
.arg("--grd")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--lx")
.arg("-v")
.arg("--grd")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--stm")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.env_clear();
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--rust_log")
.arg("trace")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--rust_log")
.arg("warn")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
let tempdir = assert_fs::TempDir::new()?;
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--lib")
.arg("naive")
.arg("--export")
.arg(tempdir.path().with_file_name("test.json"));
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--lib")
.arg("naive")
.arg("--export")
.arg(tempdir.path().with_file_name("test.json"));
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(tempdir.path().with_file_name("test.json"))
.arg("--an")
.arg("--grd")
.arg("--import")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--com")
.arg("--rust_log")
.arg("warn")
.arg("--lib")
.arg("naive");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
Ok(())
}
#[test]
fn runs_biodivine() -> Result<(), Box<dyn std::error::Error>> {
let file = assert_fs::NamedTempFile::new("input_instance.adf")?; let file = assert_fs::NamedTempFile::new("input_instance.adf")?;
file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1))).")?; file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1))).")?;
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?; let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
@ -95,38 +237,6 @@ fn runs() -> Result<(), Box<dyn std::error::Error>> {
cmd.assert().success().stdout(predicate::str::contains( cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n", "u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
)); ));
let tempdir = assert_fs::TempDir::new()?;
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--export")
.arg(tempdir.path().with_file_name("test.json"));
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--export")
.arg(tempdir.path().with_file_name("test.json"));
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(tempdir.path().with_file_name("test.json"))
.arg("--an")
.arg("--grd")
.arg("--import");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?; cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path()) cmd.arg(file.path())
.arg("--an") .arg("--an")
@ -138,3 +248,107 @@ fn runs() -> Result<(), Box<dyn std::error::Error>> {
)); ));
Ok(()) Ok(())
} }
#[test]
fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
let file = assert_fs::NamedTempFile::new("input_instance.adf")?;
file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1))).")?;
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
let mut cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(wrong_file.path());
cmd.assert()
.failure()
.stderr(predicate::str::contains("code: Eof"));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("-vv")
.arg("--grd")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("-q")
.arg("--grd")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--lx")
.arg("-v")
.arg("--grd")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--stm")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.env_clear();
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--rust_log")
.arg("trace")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--grd")
.arg("--rust_log")
.arg("warn")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
cmd = Command::cargo_bin("adf_bdd")?;
cmd.arg(file.path())
.arg("--an")
.arg("--com")
.arg("--rust_log")
.arg("warn")
.arg("--lib")
.arg("hybrid");
cmd.assert().success().stdout(predicate::str::contains(
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
));
Ok(())
}

View File

@ -10,7 +10,7 @@ fn {name}() {{
let input = std::fs::read_to_string(resource).unwrap(); let input = std::fs::read_to_string(resource).unwrap();
parser.parse()(&input).unwrap(); parser.parse()(&input).unwrap();
parser.varsort_alphanum(); parser.varsort_alphanum();
let mut adf = Adf::from_parser(&parser); let adf = Adf::from_parser(&parser);
let grounded = adf.grounded(); let grounded = adf.grounded();
assert_eq!( assert_eq!(
format!("{{}}", adf.print_interpretation(&grounded)), format!("{{}}", adf.print_interpretation(&grounded)),