1
0
mirror of https://github.com/ellmau/adf-obdd.git synced 2025-12-18 09:19:38 +01:00

milestone/nogoods (#74)

* Add NoGood and NoGoodStore (#65)
* Update Flake to nix 22.05
* Add nogood-algorithm to the ADF
* Add public api for the nogood-learner
* Add direnv to gitignore
* Avoid a Box, support custom heuristics functions
* Add ng option with heu to binary
* Introduce a new flag to handle big instances (modelcount vs adhoccount)
Note that adhoccount without modelcount will not produce correct modelcounts if memoization is used
* Add new heuristic
* Add crossbeam-channel to represent an output-stream of stable models
Uses a crossbeam-channel to fill a Queue, which can be used safely
from outside the function.
This rework is done to also allow ad-hoc output of results in a
potentially multi-threaded setup.
* Added documentation on this new feature on the module-page
* Fix broken links in rust-doc
* Update Readme for lib to reflect the new NoGood API
* Add metadata to bin/Cargo.toml, add features
* added a benchmark feature, to easily compile benchmark-releases
* Fix facet count tests
* Add multithread-safe functionality for the dictionary/ordering
* Streamline a couple of API calls
* Expose more structs and methods to the public API
* Breaking some API (though nothing which is currently used in the binary)
* Simple version of gh pages
* Added more links and information to the landing page
* Fix badges in the app-doc
* Add two valued interpretation
Parameterised the stable-nogood algorithm to allow a variable
stability check function.
* Refactor nogood-algorithm name
* Update README.md and documentation (`docu` folder)
`README.md` on the `/` level is now presenting the same information
which is provided in `docs/index.md`
* Update main
- Update main functionality
- Update naming
* Fix cli-test
* Update Version to 0.3.0
Due to braking API changes and reaching a milestone, the version is
incremented to
0.3.0 (beta)
* Update Documentation navigation (#81)
* flake.lock: Update
Flake lock file updates:
• Updated input 'flake-utils':
    'github:numtide/flake-utils/1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1' (2022-05-30)
  → 'github:numtide/flake-utils/7e2a3b3dfd9af950a856d66b0a7d01e3c18aa249' (2022-07-04)
• Updated input 'gitignoresrc':
    'github:hercules-ci/gitignore.nix/bff2832ec341cf30acb3a4d3e2e7f1f7b590116a' (2022-03-05)
  → 'github:hercules-ci/gitignore.nix/f2ea0f8ff1bce948ccb6b893d15d5ea3efaf1364' (2022-07-21)
• Updated input 'nixpkgs':
    'github:NixOS/nixpkgs/8b538fcb329a7bc3d153962f17c509ee49166973' (2022-06-15)
  → 'github:NixOS/nixpkgs/e43cf1748462c81202a32b26294e9f8eefcc3462' (2022-08-01)
• Updated input 'nixpkgs-unstable':
    'github:NixOS/nixpkgs/b1957596ff1c7aa8c55c4512b7ad1c9672502e8e' (2022-06-15)
  → 'github:NixOS/nixpkgs/7b9be38c7250b22d829ab6effdee90d5e40c6e5c' (2022-07-30)
• Updated input 'rust-overlay':
    'github:oxalica/rust-overlay/9eea93067eff400846c36f57b7499df9ef428ba0' (2022-06-17)
  → 'github:oxalica/rust-overlay/9055cb4f33f062c0dd33aa7e3c89140da8f70057' (2022-08-02)
* Add type alias for NoGood
Add a type alias `Interpretation` for NoGood to reflect the duality
where an Interpretation might become a NoGood.
* Add documentation information about later revisions on VarContainer

Co-authored-by: Maximilian Marx <mmarx@wh2.tu-dresden.de>
This commit is contained in:
Stefan Ellmauthaler 2022-08-02 14:02:00 +02:00 committed by GitHub
parent 45700d7224
commit d7e71e5da7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 2153 additions and 354 deletions

6
.gitignore vendored
View File

@ -21,3 +21,9 @@ tramp
*_flymake*
/tests/out/
# Ignore direnv data
/.direnv/
# ignore perfdata
perf.data

127
Cargo.lock generated
View File

@ -3,10 +3,28 @@
version = 3
[[package]]
name = "adf_bdd"
version = "0.2.4"
name = "adf-bdd-solver"
version = "0.3.0"
dependencies = [
"biodivine-lib-bdd 0.4.0",
"adf_bdd",
"assert_cmd",
"assert_fs",
"clap",
"crossbeam-channel",
"env_logger 0.9.0",
"log",
"predicates",
"serde",
"serde_json",
"strum",
]
[[package]]
name = "adf_bdd"
version = "0.3.0"
dependencies = [
"biodivine-lib-bdd",
"crossbeam-channel",
"derivative",
"env_logger 0.9.0",
"lexical-sort",
@ -14,41 +32,13 @@ dependencies = [
"nom",
"quickcheck",
"quickcheck_macros",
"roaring",
"serde",
"serde_json",
"strum",
"test-log",
]
[[package]]
name = "adf_bdd"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e781519ea5434514f014476c02ccee777b28e600ad58fadca195715acb194c69"
dependencies = [
"biodivine-lib-bdd 0.3.0",
"derivative",
"lexical-sort",
"log",
"nom",
"serde",
"serde_json",
]
[[package]]
name = "adf_bdd-solver"
version = "0.2.4"
dependencies = [
"adf_bdd 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)",
"assert_cmd",
"assert_fs",
"clap",
"env_logger 0.9.0",
"log",
"predicates",
"serde",
"serde_json",
]
[[package]]
name = "aho-corasick"
version = "0.7.18"
@ -109,16 +99,6 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "biodivine-lib-bdd"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bddcfb460c1131729ec8d797ce0be49ec102506e764ce62f20a40056516851c5"
dependencies = [
"fxhash",
"rand 0.7.3",
]
[[package]]
name = "biodivine-lib-bdd"
version = "0.4.0"
@ -147,6 +127,12 @@ dependencies = [
"regex-automata",
]
[[package]]
name = "bytemuck"
version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc"
[[package]]
name = "byteorder"
version = "1.4.3"
@ -198,6 +184,16 @@ dependencies = [
"os_str_bytes",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.7"
@ -718,6 +714,29 @@ dependencies = [
"winapi",
]
[[package]]
name = "retain_mut"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4389f1d5789befaf6029ebd9f7dac4af7f7e3d61b69d4f30e2ac02b57e7712b0"
[[package]]
name = "roaring"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd539cab4e32019956fe7e0cf160bb6d4802f4be2b52c4253d76d3bb0f85a5f7"
dependencies = [
"bytemuck",
"byteorder",
"retain_mut",
]
[[package]]
name = "rustversion"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0a5f7c728f5d284929a1cccb5bc19884422bfe6ef4d6c409da2c41838983fcf"
[[package]]
name = "ryu"
version = "1.0.9"
@ -770,6 +789,28 @@ version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "strum"
version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f"
dependencies = [
"strum_macros",
]
[[package]]
name = "strum_macros"
version = "0.24.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef"
dependencies = [
"heck",
"proc-macro2",
"quote",
"rustversion",
"syn",
]
[[package]]
name = "syn"
version = "1.0.92"

114
README.md
View File

@ -8,7 +8,16 @@
![Crates.io](https://img.shields.io/crates/l/adf_bdd)
[![GitHub Discussions](https://img.shields.io/github/discussions/ellmau/adf-obdd)](https://github.com/ellmau/adf-obdd/discussions) ![rust-edition](https://img.shields.io/badge/Rust--edition-2021-blue?logo=rust)
# Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden (ADF-BDD)
# Abstract Dialectical Frameworks solved by (ordered) Binary Decision Diagrams; developed in Dresden (ADF-oBDD project)
This project is currently split into two parts:
- a [binary (adf-bdd)](bin), which allows one to easily answer semantics questions on abstract dialectical frameworks
- a [library (adf_bdd)](lib), which contains all the necessary algorithms and an open API which compute the answers to the semantics questions
Latest documentation of the API can be found [here](https://docs.rs/adf_bdd/latest/adf_bdd/).
The current version of the binary can be downloaded [here](https://github.com/ellmau/adf-obdd/releases).
Do not hesitate to report bugs or ask about features in the [issues-section](https://github.com/ellmau/adf-obdd/issues) or have a conversation about anything of the project in the [discussion space](https://github.com/ellmau/adf-obdd/discussions)
## Abstract Dialectical Frameworks
@ -16,59 +25,6 @@ An abstract dialectical framework (ADF) consists of abstract statements. Each st
## Ordered Binary Decision Diagram
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
## Usage of the binary
```
USAGE:
adf_bdd [OPTIONS] <INPUT>
ARGS:
<INPUT> Input filename
OPTIONS:
--an Sorts variables in an alphanumeric manner
--com Compute the complete models
--counter <COUNTER> Set if the (counter-)models shall be computed and printed,
possible values are 'nai' and 'mem' for naive and memoization
repectively (only works in hybrid and naive mode)
--export <EXPORT> Export the adf-bdd state after parsing and BDD instantiation to
the given filename
--grd Compute the grounded model
-h, --help Print help information
--import Import an adf- bdd state instead of an adf
--lib <IMPLEMENTATION> choose the bdd implementation of either 'biodivine', 'naive', or
hybrid [default: hybrid]
--lx Sorts variables in an lexicographic manner
-q Sets log verbosity to only errors
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
-q are not use [env: RUST_LOG=debug]
--stm Compute the stable models
--stmca Compute the stable models with the help of modelcounting using
heuristics a
--stmcb Compute the stable models with the help of modelcounting using
heuristics b
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
--stmrew Compute the stable models with a single-formula rewriting (only
hybrid lib-mode)
--stmrew2 Compute the stable models with a single-formula rewriting on
internal representation(only hybrid lib-mode)
-v Sets log verbosity (multiple times means more verbose)
-V, --version Print version information
```
Note that import and export only works if the naive library is chosen
Right now there is no additional information to the computed models, so if you use `--com --grd --stm` as the command line arguments the borders between the results are not obviously communicated.
They can be easily identified though:
- The computation is always in the same order
- grd
- com
- stm
- We know that there is always exactly one grounded model
- We know that there always exist at least one complete model (i.e. the grounded one)
- We know that there does not need to exist a stable model
- We know that every stable model is a complete model too
## Input-file format:
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
@ -82,56 +38,14 @@ The binary predicate ac relates each statement to one propositional formula in p
# Features
`adhoccounting` will cache the modelcount on-the-fly during the construction of the BDD
- `adhoccounting` will cache the modelcount on-the-fly during the construction of the BDD
- `adhoccountmodels` allows in addition to compute the models ad-hoc too. Note that the memoization approach for modelcounting does not work correctly if `adhoccounting` is set and `adhoccountmodels` is not.
# Development notes
Additional information for contribution, testing, and development in general can be found here.
## Contributing to the project
You want to help and contribute to the project? That is great. Please see the [contributing guidelines](https://github.com/ellmau/adf-obdd/blob/main/.github/CONTRIBUTING.md) first.
## Building the binary:
To build the binary, you need to run
```bash
$> cargo build --workspace --release
```
To build the binary with debug-symbols, run
```bash
$> cargo build --workspace
```
## Testing with the `res` folder:
To run all the tests placed in the submodule you need to run
```bash
$> git submodule init
```
at the first time.
Afterwards you need to update the content of the submodule to be on the currently used revision by
```bash
$> git submodule update
```
The tests can be started by using the test-framework of cargo, i.e.
```bash
$> cargo test
```
Note that some of the instances are quite big and it might take some time to finish all the tests.
If you do not initialise the submodule, tests will "only" run on the other unit-tests and (possibly forthcoming) other integration tests.
Due to the way of the generated test-modules you need to call
```bash
$> cargo clean
```
if you change some of your test-cases.
To remove the tests just type
```bash
$> git submodule deinit res/adf-instances
```
or
```bash
$> git submodule deinit --all
```
# Acknowledgements
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
@ -139,7 +53,7 @@ the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Educ
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
# Affiliation
This work has been party developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
# Disclaimer
Hosting content here does not establish any formal or legal relation to TU Dresden
Hosting content here does not establish any formal or legal relation to TU Dresden.

View File

@ -1,10 +1,12 @@
[package]
name = "adf_bdd-solver"
version = "0.2.4"
name = "adf-bdd-solver"
version = "0.3.0"
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
edition = "2021"
homepage = "https://ellmau.github.io/adf-obdd"
repository = "https://github.com/ellmau/adf-obdd"
license = "MIT"
exclude = ["res/", "./flake*", "*.nix", ".envrc", "_config.yml"]
exclude = ["res/", "./flake*", "*.nix", ".envrc", "_config.yml", "tarpaulin-report.*", "*~"]
description = "Solver for ADFs grounded, complete, and stable semantics by utilising OBDDs - ordered binary decision diagrams"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -14,12 +16,14 @@ name = "adf-bdd"
path = "src/main.rs"
[dependencies]
adf_bdd = { version="0.2.4", default-features = false }
adf_bdd = { version="0.3.0", path="../lib", default-features = false }
clap = {version = "3.2.12", features = [ "derive", "cargo", "env" ]}
log = { version = "0.4", features = [ "max_level_trace", "release_max_level_info" ] }
serde = { version = "1.0", features = ["derive","rc"] }
serde_json = "1.0"
env_logger = "0.9"
strum = { version = "0.24" }
crossbeam-channel = "0.5"
[dev-dependencies]
assert_cmd = "2.0"
@ -32,3 +36,5 @@ adhoccounting = ["adf_bdd/adhoccounting"] # count models ad-hoc - disable if
importexport = ["adf_bdd/importexport"]
variablelist = [ "HashSet", "adf_bdd/variablelist" ]
HashSet = ["adf_bdd/HashSet"]
adhoccountmodels = ["adf_bdd/adhoccountmodels"]
benchmark = ["adf_bdd/benchmark"]

View File

@ -11,7 +11,7 @@ An ordered binary decision diagram is a normalised representation of binary func
## Usage
```
USAGE:
adf_bdd [OPTIONS] <INPUT>
adf-bdd [OPTIONS] <INPUT>
ARGS:
<INPUT> Input filename
@ -26,8 +26,11 @@ OPTIONS:
the given filename
--grd Compute the grounded model
-h, --help Print help information
--heu <HEU> Choose which heuristics shall be used by the nogood-learning
approach [possible values: Simple, MinModMinPathsMaxVarImp,
MinModMaxVarImpMinPaths]
--import Import an adf- bdd state instead of an adf
--lib <IMPLEMENTATION> choose the bdd implementation of either 'biodivine', 'naive', or
--lib <IMPLEMENTATION> Choose the bdd implementation of either 'biodivine', 'naive', or
hybrid [default: hybrid]
--lx Sorts variables in an lexicographic manner
-q Sets log verbosity to only errors
@ -38,11 +41,14 @@ OPTIONS:
heuristics a
--stmcb Compute the stable models with the help of modelcounting using
heuristics b
--stmng Compute the stable models with the nogood-learning based approach
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
--stmrew Compute the stable models with a single-formula rewriting (only
hybrid lib-mode)
--stmrew2 Compute the stable models with a single-formula rewriting on
internal representation(only hybrid lib-mode)
--twoval Compute the two valued models with the nogood-learning based
approach
-v Sets log verbosity (multiple times means more verbose)
-V, --version Print version information
```
@ -121,7 +127,7 @@ the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Educ
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
# Affiliation
This work has been party developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
# Disclaimer
Hosting content here does not establish any formal or legal relation to TU Dresden
Hosting content here does not establish any formal or legal relation to TU Dresden.

View File

@ -17,7 +17,7 @@ In addition some further features, like counter-model counting is not supported
# Usage
```plain
USAGE:
adf_bdd [OPTIONS] <INPUT>
adf-bdd [OPTIONS] <INPUT>
ARGS:
<INPUT> Input filename
@ -32,20 +32,29 @@ OPTIONS:
the given filename
--grd Compute the grounded model
-h, --help Print help information
--heu <HEU> Choose which heuristics shall be used by the nogood-learning
approach [possible values: Simple, MinModMinPathsMaxVarImp,
MinModMaxVarImpMinPaths]
--import Import an adf- bdd state instead of an adf
--lib <IMPLEMENTATION> choose the bdd implementation of either 'biodivine', 'naive', or
--lib <IMPLEMENTATION> Choose the bdd implementation of either 'biodivine', 'naive', or
hybrid [default: hybrid]
--lx Sorts variables in an lexicographic manner
-q Sets log verbosity to only errors
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
-q are not use [env: RUST_LOG=debug]
--stm Compute the stable models
--stmc Compute the stable models with the help of modelcounting
--stmca Compute the stable models with the help of modelcounting using
heuristics a
--stmcb Compute the stable models with the help of modelcounting using
heuristics b
--stmng Compute the stable models with the nogood-learning based approach
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
--stmrew Compute the stable models with a single-formula rewriting (only
hybrid lib-mode)
--stmrew2 Compute the stable models with a single-formula rewriting on
internal representation(only hybrid lib-mode)
--twoval Compute the two valued models with the nogood-learning based
approach
-v Sets log verbosity (multiple times means more verbose)
-V, --version Print version information
```
@ -74,6 +83,8 @@ use adf_bdd::adfbiodivine::Adf as BdAdf;
use adf_bdd::parser::AdfParser;
use clap::Parser;
use crossbeam_channel::unbounded;
use strum::VariantNames;
#[derive(Parser, Debug)]
#[clap(author, version, about)]
@ -84,7 +95,7 @@ struct App {
/// Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
#[clap(long = "rust_log", env)]
rust_log: Option<String>,
/// choose the bdd implementation of either 'biodivine', 'naive', or hybrid
/// Choose the bdd implementation of either 'biodivine', 'naive', or hybrid
#[clap(long = "lib", default_value = "hybrid")]
implementation: String,
/// Sets log verbosity (multiple times means more verbose)
@ -120,6 +131,15 @@ struct App {
/// Compute the stable models with a single-formula rewriting on internal representation(only hybrid lib-mode)
#[clap(long = "stmrew2")]
stable_rew2: bool,
/// Compute the stable models with the nogood-learning based approach
#[clap(long = "stmng")]
stable_ng: bool,
/// Choose which heuristics shall be used by the nogood-learning approach
#[clap(long, possible_values = adf_bdd::adf::heuristics::Heuristic::VARIANTS.iter().filter(|&v| v != &"Custom"))]
heu: Option<adf_bdd::adf::heuristics::Heuristic<'static>>,
/// Compute the two valued models with the nogood-learning based approach
#[clap(long = "twoval")]
two_val: bool,
/// Compute the complete models
#[clap(long = "com")]
complete: bool,
@ -215,6 +235,14 @@ impl App {
}
}
if self.two_val {
let (sender, receiver) = unbounded();
naive_adf.two_val_nogood_channel(self.heu.unwrap_or_default(), sender);
for model in receiver.into_iter() {
print!("{}", printer.print_interpretation(&model));
}
}
if self.stable {
for model in naive_adf.stable() {
print!("{}", printer.print_interpretation(&model));
@ -244,6 +272,12 @@ impl App {
print!("{}", printer.print_interpretation(&model));
}
}
if self.stable_ng {
for model in naive_adf.stable_nogood(self.heu.unwrap_or_default()) {
print!("{}", printer.print_interpretation(&model));
}
}
}
"biodivine" => {
if self.counter.is_some() {

View File

@ -27,7 +27,7 @@ fn arguments() -> Result<(), Box<dyn std::error::Error>> {
cmd.arg("--version");
cmd.assert()
.success()
.stdout(predicate::str::contains("adf_bdd-solver "));
.stdout(predicate::str::contains("adf-bdd-solver "));
Ok(())
}

3
docs/_config.yml Normal file
View File

@ -0,0 +1,3 @@
theme: jekyll-theme-architect
show_downloads: false
markdown: kramdown

139
docs/adf-bdd.md Normal file
View File

@ -0,0 +1,139 @@
[![Crates.io](https://img.shields.io/crates/v/adf_bdd)](https://crates.io/crates/adf_bdd)
[![docs.rs](https://img.shields.io/docsrs/adf_bdd?label=docs.rs)](https://docs.rs/adf_bdd/latest/adf_bdd/)
![GitHub Workflow Status](https://img.shields.io/github/workflow/status/ellmau/adf-obdd/Code%20coverage%20with%20tarpaulin)
[![Coveralls](https://img.shields.io/coveralls/github/ellmau/adf-obdd)](https://coveralls.io/github/ellmau/adf-obdd)
![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/ellmau/adf-obdd?include_prereleases)
![GitHub (Pre-)Release Date](https://img.shields.io/github/release-date-pre/ellmau/adf-obdd?label=release%20from) ![GitHub top language](https://img.shields.io/github/languages/top/ellmau/adf-obdd)
[![GitHub all releases](https://img.shields.io/github/downloads/ellmau/adf-obdd/total)](https://github.com/ellmau/adf-obdd/releases)
![Crates.io](https://img.shields.io/crates/l/adf_bdd)
[![GitHub Discussions](https://img.shields.io/github/discussions/ellmau/adf-obdd)](https://github.com/ellmau/adf-obdd/discussions) ![rust-edition](https://img.shields.io/badge/Rust--edition-2021-blue?logo=rust)
| [Home](index.md) | [Binary](adf-bdd.md) | [Library](adf_bdd.md)| [Repository](https://github.com/ellmau/adf-obdd) |
|--- | --- | --- | --- |
# Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden (ADF-BDD)
This is the readme for the executable solver.
## Usage
```
USAGE:
adf-bdd [OPTIONS] <INPUT>
ARGS:
<INPUT> Input filename
OPTIONS:
--an Sorts variables in an alphanumeric manner
--com Compute the complete models
--counter <COUNTER> Set if the (counter-)models shall be computed and printed,
possible values are 'nai' and 'mem' for naive and memoization
repectively (only works in hybrid and naive mode)
--export <EXPORT> Export the adf-bdd state after parsing and BDD instantiation to
the given filename
--grd Compute the grounded model
-h, --help Print help information
--heu <HEU> Choose which heuristics shall be used by the nogood-learning
approach [possible values: Simple, MinModMinPathsMaxVarImp,
MinModMaxVarImpMinPaths]
--import Import an adf- bdd state instead of an adf
--lib <IMPLEMENTATION> Choose the bdd implementation of either 'biodivine', 'naive', or
hybrid [default: hybrid]
--lx Sorts variables in an lexicographic manner
-q Sets log verbosity to only errors
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
-q are not use [env: RUST_LOG=debug]
--stm Compute the stable models
--stmca Compute the stable models with the help of modelcounting using
heuristics a
--stmcb Compute the stable models with the help of modelcounting using
heuristics b
--stmng Compute the stable models with the nogood-learning based approach
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
--stmrew Compute the stable models with a single-formula rewriting (only
hybrid lib-mode)
--stmrew2 Compute the stable models with a single-formula rewriting on
internal representation(only hybrid lib-mode)
--twoval Compute the two valued models with the nogood-learning based
approach
-v Sets log verbosity (multiple times means more verbose)
-V, --version Print version information
```
Note that import and export only works if the naive library is chosen
Right now there is no additional information to the computed models, so if you use --com --grd --stm the borders between the results are not obviously communicated.
They can be easily identified though:
- The computation is always in the same order
- grd
- com
- stm
- We know that there is always exactly one grounded model
- We know that there always exist at least one complete model (i.e. the grounded one)
- We know that there does not need to exist a stable model
- We know that every stable model is a complete model too
## Input-file format:
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
- and(x,y): conjunction
- or(x,y): disjunctin
- iff(x,Y): if and only if
- xor(x,y): exclusive or
- neg(x): classical negation
- c(v): constant symbol "verum" - tautology/top
- c(f): constant symbol "falsum" - inconsistency/bot
# Development notes
To build the binary, you need to run
```bash
$> cargo build --workspace --release
```
To build the binary with debug-symbols, run
```bash
$> cargo build --workspace
```
To run all the tests placed in the submodule you need to run
```bash
$> git submodule init
```
at the first time.
Afterwards you need to update the content of the submodule to be on the currently used revision by
```bash
$> git submodule update
```
The tests can be started by using the test-framework of cargo, i.e.
```bash
$> cargo test
```
Note that some of the instances are quite big and it might take some time to finish all the tests.
If you do not initialise the submodule, tests will "only" run on the other unit-tests and (possibly forthcoming) other integration tests.
Due to the way of the generated test-modules you need to call
```bash
$> cargo clean
```
if you change some of your test-cases.
To remove the tests just type
```bash
$> git submodule deinit res/adf-instances
```
or
```bash
$> git submodule deinit --all
```
# Acknowledgements
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
# Affiliation
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
# Disclaimer
Hosting content here does not establish any formal or legal relation to TU Dresden.

167
docs/adf_bdd.md Normal file
View File

@ -0,0 +1,167 @@
[![Crates.io](https://img.shields.io/crates/v/adf_bdd)](https://crates.io/crates/adf_bdd)
[![docs.rs](https://img.shields.io/docsrs/adf_bdd?label=docs.rs)](https://docs.rs/adf_bdd/latest/adf_bdd/)
![GitHub Workflow Status](https://img.shields.io/github/workflow/status/ellmau/adf-obdd/Code%20coverage%20with%20tarpaulin)
[![Coveralls](https://img.shields.io/coveralls/github/ellmau/adf-obdd)](https://coveralls.io/github/ellmau/adf-obdd)
![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/ellmau/adf-obdd?include_prereleases)
![GitHub (Pre-)Release Date](https://img.shields.io/github/release-date-pre/ellmau/adf-obdd?label=release%20from) ![GitHub top language](https://img.shields.io/github/languages/top/ellmau/adf-obdd)
[![GitHub all releases](https://img.shields.io/github/downloads/ellmau/adf-obdd/total)](https://github.com/ellmau/adf-obdd/releases)
![Crates.io](https://img.shields.io/crates/l/adf_bdd)
[![GitHub Discussions](https://img.shields.io/github/discussions/ellmau/adf-obdd)](https://github.com/ellmau/adf-obdd/discussions) ![rust-edition](https://img.shields.io/badge/Rust--edition-2021-blue?logo=rust)
| [Home](index.md) | [Binary](adf-bdd.md) | [Library](adf_bdd.md)| [Repository](https://github.com/ellmau/adf-obdd) |
|--- | --- | --- | --- |
# Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden (ADF_BDD)
This library contains an efficient representation of Abstract Dialectical Frameworks (ADf) by utilising an implementation of Ordered Binary Decision Diagrams (OBDD)
## Noteworthy relations between ADF semantics
They can be easily identified though:
* The computation is always in the same order
* grd
* com
* stm
* We know that there is always exactly one grounded model
* We know that there always exist at least one complete model (i.e. the grounded one)
* We know that there does not need to exist a stable model
* We know that every stable model is a complete model too
## Ordered Binary Decision Diagram
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
Note that one advantage of this implementation is that only one oBDD is used for all acceptance conditions. This can be done because all of them have the identical signature (i.e. the set of all statements + top and bottom concepts). Due to this uniform representation reductions on subformulae which are shared by two or more statements only need to be computed once and is already cached in the data structure for further applications.
The used algorithm to create a BDD, based on a given formula does not perform well on bigger formulae, therefore it is possible to use a state-of-the art library to instantiate the BDD (https://github.com/sybila/biodivine-lib-bdd). It is possible to either stay with the biodivine library or switch back to the variant implemented by adf-bdd. The variant implemented in this library offers reuse of already done reductions and memoisation techniques, which are not offered by biodivine. In addition some further features, like counter-model counting is not supported by biodivine.
Note that import and export only works if the naive library is chosen
## Input-file format:
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement. The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
```plain
and(x,y): conjunction
or(x,y): disjunctin
iff(x,Y): if and only if
xor(x,y): exclusive or
neg(x): classical negation
c(v): constant symbol “verum” - tautology/top
c(f): constant symbol “falsum” - inconsistency/bot
```
### Example input file:
```plain
s(a).
s(b).
s(c).
s(d).
ac(a,c(v)).
ac(b,or(a,b)).
ac(c,neg(b)).
ac(d,d).
```
## Usage examples
First parse a given ADF and sort the statements, if needed.
```rust
use adf_bdd::parser::AdfParser;
use adf_bdd::adf::Adf;
// use the above example as input
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
let parser = AdfParser::default();
match parser.parse()(&input) {
Ok(_) => log::info!("[Done] parsing"),
Err(e) => {
log::error!(
"Error during parsing:\n{} \n\n cannot continue, panic!",
e
);
panic!("Parsing failed, see log for further details")
}
}
// sort lexicographic
parser.varsort_lexi();
```
use the naive/in-crate implementation
```rust
// create Adf
let mut adf = Adf::from_parser(&parser);
// compute and print the complete models
let printer = adf.print_dictionary();
for model in adf.complete() {
print!("{}", printer.print_interpretation(&model));
}
```
use the biodivine implementation
```rust
// create Adf
let adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser);
// compute and print the complete models
let printer = adf.print_dictionary();
for model in adf.complete() {
print!("{}", printer.print_interpretation(&model));
}
```
use the hybrid approach implementation
```rust
// create biodivine Adf
let badf = adf_bdd::adfbiodivine::Adf::from_parser(&parser);
// instantiate the internally used adf after the reduction done by biodivine
let mut adf = badf.hybrid_step();
// compute and print the complete models
let printer = adf.print_dictionary();
for model in adf.complete() {
print!("{}", printer.print_interpretation(&model));
}
```
use the new `NoGood`-based algorithm and utilise the new interface with channels:
```rust
use adf_bdd::parser::AdfParser;
use adf_bdd::adf::Adf;
use adf_bdd::adf::heuristics::Heuristic;
use adf_bdd::datatypes::{Term, adf::VarContainer};
// create a channel
let (s, r) = crossbeam_channel::unbounded();
let variables = VarContainer::default();
let variables_worker = variables.clone();
// spawn a solver thread
let solving = std::thread::spawn(move || {
// use the above example as input
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
let parser = AdfParser::with_var_container(variables_worker);
parser.parse()(&input).expect("parsing worked well");
// use hybrid approach
let mut adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser).hybrid_step();
// compute stable with the simple heuristic
adf.stable_nogood_channel(Heuristic::Simple, s);
});
let printer = variables.print_dictionary();
// print results as they are computed
while let Ok(result) = r.recv() {
print!("stable model: {:?} \n", result);
// use dictionary
print!("stable model with variable names: {}", printer.print_interpretation(&result));
}
// waiting for the other thread to close
solving.join().unwrap();
```
# Acknowledgements
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
# Affiliation
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
# Disclaimer
Hosting content here does not establish any formal or legal relation to TU Dresden.

62
docs/index.md Normal file
View File

@ -0,0 +1,62 @@
[![Crates.io](https://img.shields.io/crates/v/adf_bdd)](https://crates.io/crates/adf_bdd)
[![docs.rs](https://img.shields.io/docsrs/adf_bdd?label=docs.rs)](https://docs.rs/adf_bdd/latest/adf_bdd/)
![GitHub Workflow Status](https://img.shields.io/github/workflow/status/ellmau/adf-obdd/Code%20coverage%20with%20tarpaulin)
[![Coveralls](https://img.shields.io/coveralls/github/ellmau/adf-obdd)](https://coveralls.io/github/ellmau/adf-obdd)
![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/ellmau/adf-obdd?include_prereleases)
![GitHub (Pre-)Release Date](https://img.shields.io/github/release-date-pre/ellmau/adf-obdd?label=release%20from) ![GitHub top language](https://img.shields.io/github/languages/top/ellmau/adf-obdd)
[![GitHub all releases](https://img.shields.io/github/downloads/ellmau/adf-obdd/total)](https://github.com/ellmau/adf-obdd/releases)
![Crates.io](https://img.shields.io/crates/l/adf_bdd)
[![GitHub Discussions](https://img.shields.io/github/discussions/ellmau/adf-obdd)](https://github.com/ellmau/adf-obdd/discussions) ![rust-edition](https://img.shields.io/badge/Rust--edition-2021-blue?logo=rust)
| [Home](index.md) | [Binary](adf-bdd.md) | [Library](adf_bdd.md)| [Repository](https://github.com/ellmau/adf-obdd) |
|--- | --- | --- | --- |
# Abstract Dialectical Frameworks solved by (ordered) Binary Decision Diagrams; developed in Dresden (ADF-oBDD project)
This project is currently split into two parts:
- a [binary (adf-bdd)](adf-bdd.md), which allows one to easily answer semantics questions on abstract dialectical frameworks
- a [library (adf_bdd)](adf_bdd.md), which contains all the necessary algorithms and an open API which compute the answers to the semantics questions
Latest documentation of the API can be found [here](https://docs.rs/adf_bdd/latest/adf_bdd/).
The current version of the binary can be downloaded [here](https://github.com/ellmau/adf-obdd/releases).
Do not hesitate to report bugs or ask about features in the [issues-section](https://github.com/ellmau/adf-obdd/issues) or have a conversation about anything of the project in the [discussion space](https://github.com/ellmau/adf-obdd/discussions)
## Abstract Dialectical Frameworks
An abstract dialectical framework (ADF) consists of abstract statements. Each statement has an unique label and might be related to other statements (s) in the ADF. This relation is defined by a so-called acceptance condition (ac), which intuitively is a propositional formula, where the variable symbols are the labels of the statements. An interpretation is a three valued function which maps to each statement a truth value (true, false, undecided). We call such an interpretation a model, if each acceptance condition agrees to the interpration.
## Ordered Binary Decision Diagram
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
## Input-file format:
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
- and(x,y): conjunction
- or(x,y): disjunctin
- iff(x,Y): if and only if
- xor(x,y): exclusive or
- neg(x): classical negation
- c(v): constant symbol "verum" - tautology/top
- c(f): constant symbol "falsum" - inconsistency/bot
# Features
- `adhoccounting` will cache the modelcount on-the-fly during the construction of the BDD
- `adhoccountmodels` allows in addition to compute the models ad-hoc too. Note that the memoization approach for modelcounting does not work correctly if `adhoccounting` is set and `adhoccountmodels` is not.
# Development notes
Additional information for contribution, testing, and development in general can be found here.
## Contributing to the project
You want to help and contribute to the project? That is great. Please see the [contributing guidelines](https://github.com/ellmau/adf-obdd/blob/main/.github/CONTRIBUTING.md) first.
# Acknowledgements
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
# Affiliation
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
# Disclaimer
Hosting content here does not establish any formal or legal relation to TU Dresden.

88
flake.lock generated
View File

@ -1,43 +1,12 @@
{
"nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1650374568,
"narHash": "sha256-Z+s0J8/r907g149rllvwhb4pKi8Wam5ij0st8PwAh+E=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "b4a34015c698c7793d592d66adbab377907a2be8",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"locked": {
"lastModified": 1652776076,
"narHash": "sha256-gzTw/v1vj4dOVbpBSJX4J0DwUR6LIyXo7/SuuTJp1kM=",
"lastModified": 1656928814,
"narHash": "sha256-RIFfgBuKz6Hp89yRr7+NR5tzIAbn52h8vT6vXkYjZoM=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "04c1b180862888302ddfb2e3ad9eaa63afc60cf8",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"locked": {
"lastModified": 1637014545,
"narHash": "sha256-26IZAc5yzlD9FlDT54io1oqG/bBoyka+FJk5guaX4x4=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "bba5dcc8e0b20ab664967ad83d24d64cb64ec4f4",
"rev": "7e2a3b3dfd9af950a856d66b0a7d01e3c18aa249",
"type": "github"
},
"original": {
@ -49,11 +18,11 @@
"gitignoresrc": {
"flake": false,
"locked": {
"lastModified": 1646480205,
"narHash": "sha256-kekOlTlu45vuK2L9nq8iVN17V3sB0WWPqTTW3a2SQG0=",
"lastModified": 1658402513,
"narHash": "sha256-wk38v/mbLsOo6+IDmmH1H0ADR87iq9QTTD1BP9X2Ags=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "bff2832ec341cf30acb3a4d3e2e7f1f7b590116a",
"rev": "f2ea0f8ff1bce948ccb6b893d15d5ea3efaf1364",
"type": "github"
},
"original": {
@ -64,27 +33,27 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1653087707,
"narHash": "sha256-zfno3snrzZTWQ2B7K53QHrGZwrjnJLTRPalymrSsziU=",
"lastModified": 1659342832,
"narHash": "sha256-ePnxG4hacRd6oZMk+YeCSYMNUnHCe+qPLI0/+VaTu48=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "cbd40c72b2603ab54e7208f99f9b35fc158bc009",
"rev": "e43cf1748462c81202a32b26294e9f8eefcc3462",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-21.11",
"ref": "nixos-22.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1653060744,
"narHash": "sha256-kfRusllRumpt33J1hPV+CeCCylCXEU7e0gn2/cIM7cY=",
"lastModified": 1659219666,
"narHash": "sha256-pzYr5fokQPHv7CmUXioOhhzDy/XyWOIXP4LZvv/T7Mk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "dfd82985c273aac6eced03625f454b334daae2e8",
"rev": "7b9be38c7250b22d829ab6effdee90d5e40c6e5c",
"type": "github"
},
"original": {
@ -94,25 +63,8 @@
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1637453606,
"narHash": "sha256-Gy6cwUswft9xqsjWxFYEnx/63/qzaFUwatcbV5GF/GQ=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "8afc4e543663ca0a6a4f496262cd05233737e732",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"gitignoresrc": "gitignoresrc",
"nixpkgs": "nixpkgs",
@ -122,15 +74,19 @@
},
"rust-overlay": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_2"
"flake-utils": [
"flake-utils"
],
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1653360353,
"narHash": "sha256-WA1eevoRzs3LfTA+XWNj29thaoh4wN8HJSRs48Q2ICU=",
"lastModified": 1659409092,
"narHash": "sha256-OBY2RCYZeeOA3FTYUb86BPMUBEyKEwpwhpU2QKboRJQ=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "c5486f823e4af2c0dfb5b2673002d5c973ef5209",
"rev": "9055cb4f33f062c0dd33aa7e3c89140da8f70057",
"type": "github"
},
"original": {

View File

@ -2,21 +2,23 @@
description = "basic rust flake";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-21.11";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.05";
nixpkgs-unstable.url = "github:NixOS/nixpkgs/nixos-unstable";
rust-overlay.url = "github:oxalica/rust-overlay";
flake-utils.url = "github:numtide/flake-utils";
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
rust-overlay = {
url = "github:oxalica/rust-overlay";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-utils.follows = "flake-utils";
};
};
flake-utils.url = "github:numtide/flake-utils";
gitignoresrc = {
url = "github:hercules-ci/gitignore.nix";
flake = false;
};
};
outputs = { self, nixpkgs, nixpkgs-unstable, flake-utils, flake-compat, gitignoresrc, rust-overlay, ... }@inputs:
outputs = { self, nixpkgs, nixpkgs-unstable, flake-utils, gitignoresrc, rust-overlay, ... }@inputs:
{
#overlay = import ./nix { inherit gitignoresrc; };
} // (flake-utils.lib.eachDefaultSystem (system:
@ -33,13 +35,15 @@
RUST_LOG = "debug";
RUST_BACKTRACE = 1;
buildInputs = [
pkgs.rust-bin.nightly.latest.rustfmt
pkgs.rust-bin.stable.latest.rustfmt
pkgs.rust-bin.stable.latest.default
unstable.rust-analyzer
pkgs.rust-analyzer
pkgs.cargo-audit
pkgs.cargo-license
pkgs.cargo-tarpaulin
pkgs.cargo-kcov
pkgs.valgrind
pkgs.gnuplot
pkgs.kcov
];
};

View File

@ -1,6 +1,6 @@
[package]
name = "adf_bdd"
version = "0.2.4"
version = "0.3.0"
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
edition = "2021"
homepage = "https://ellmau.github.io/adf-obdd/"
@ -30,6 +30,9 @@ serde = { version = "1.0", features = ["derive","rc"] }
serde_json = "1.0"
biodivine-lib-bdd = "0.4.0"
derivative = "2.2.0"
roaring = "0.9.0"
strum = { version = "0.24", features = ["derive"] }
crossbeam-channel = "0.5"
[dev-dependencies]
test-log = "0.2"
@ -39,7 +42,9 @@ quickcheck_macros = "1"
[features]
default = ["adhoccounting", "variablelist" ]
adhoccounting = [] # count models ad-hoc - disable if counting is not needed
adhoccounting = [] # count paths ad-hoc - disable if counting is not needed
importexport = []
variablelist = [ "HashSet" ]
HashSet = []
adhoccountmodels = [ "adhoccounting" ] # count models as well as paths ad-hoc note that facet methods will need this feature too
benchmark = ["adhoccounting", "variablelist"] # set of features for speed benchmarks

View File

@ -122,6 +122,39 @@ for model in adf.complete() {
}
```
use the new `NoGood`-based algorithm and utilise the new interface with channels:
```rust
use adf_bdd::parser::AdfParser;
use adf_bdd::adf::Adf;
use adf_bdd::adf::heuristics::Heuristic;
use adf_bdd::datatypes::{Term, adf::VarContainer};
// create a channel
let (s, r) = crossbeam_channel::unbounded();
let variables = VarContainer::default();
let variables_worker = variables.clone();
// spawn a solver thread
let solving = std::thread::spawn(move || {
// use the above example as input
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
let parser = AdfParser::with_var_container(variables_worker);
parser.parse()(&input).expect("parsing worked well");
// use hybrid approach
let mut adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser).hybrid_step();
// compute stable with the simple heuristic
adf.stable_nogood_channel(Heuristic::Simple, s);
});
let printer = variables.print_dictionary();
// print results as they are computed
while let Ok(result) = r.recv() {
print!("stable model: {:?} \n", result);
// use dictionary
print!("stable model with variable names: {}", printer.print_interpretation(&result));
# assert_eq!(result, vec![Term(1),Term(1),Term(0),Term(0)]);
}
// waiting for the other thread to close
solving.join().unwrap();
```
# Acknowledgements
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
@ -129,7 +162,7 @@ the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Educ
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
# Affiliation
This work has been party developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
# Disclaimer
Hosting content here does not establish any formal or legal relation to TU Dresden
Hosting content here does not establish any formal or legal relation to TU Dresden.

View File

@ -5,8 +5,7 @@ This module describes the abstract dialectical framework.
- computing fixpoints
*/
use serde::{Deserialize, Serialize};
pub mod heuristics;
use crate::{
datatypes::{
adf::{
@ -15,9 +14,13 @@ use crate::{
},
FacetCounts, ModelCounts, Term, Var,
},
nogoods::{NoGood, NoGoodStore},
obdd::Bdd,
parser::{AdfParser, Formula},
};
use serde::{Deserialize, Serialize};
use self::heuristics::Heuristic;
#[derive(Serialize, Deserialize, Debug)]
/// Representation of an ADF, with an ordering and dictionary which relates statements to numbers, a binary decision diagram, and a list of acceptance conditions in [`Term`][crate::datatypes::Term] representation.
@ -44,19 +47,14 @@ impl Adf {
pub fn from_parser(parser: &AdfParser) -> Self {
log::info!("[Start] instantiating BDD");
let mut result = Self {
ordering: VarContainer::from_parser(
parser.namelist_rc_refcell(),
parser.dict_rc_refcell(),
),
ordering: parser.var_container(),
bdd: Bdd::new(),
ac: vec![Term(0); parser.namelist_rc_refcell().as_ref().borrow().len()],
ac: vec![Term(0); parser.dict_size()],
};
(0..parser.namelist_rc_refcell().borrow().len())
.into_iter()
.for_each(|value| {
log::trace!("adding variable {}", Var(value));
result.bdd.variable(Var(value));
});
(0..parser.dict_size()).into_iter().for_each(|value| {
log::trace!("adding variable {}", Var(value));
result.bdd.variable(Var(value));
});
log::debug!("[Start] adding acs");
parser
.formula_order()
@ -84,7 +82,7 @@ impl Adf {
bio_ac: &[biodivine_lib_bdd::Bdd],
) -> Self {
let mut result = Self {
ordering: VarContainer::copy(ordering),
ordering: ordering.clone(),
bdd: Bdd::new(),
ac: vec![Term(0); bio_ac.len()],
};
@ -131,6 +129,8 @@ impl Adf {
}
}
});
log::trace!("ordering: {:?}", result.ordering);
log::trace!("adf {:?} instantiated with bdd {}", result.ac, result.bdd);
result
}
@ -405,6 +405,10 @@ impl Adf {
true
}
fn is_two_valued(&self, interpretation: &[Term]) -> bool {
interpretation.iter().all(|t| t.is_truth_value())
}
fn two_val_model_counts<H>(&mut self, interpr: &[Term], heuristic: H) -> Vec<Vec<Term>>
where
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy,
@ -590,6 +594,26 @@ impl Adf {
}
}
/// Constructs the fixpoint of the given interpretation with respect to the ADF.
/// sets _update_ to [`true`] if the value has been updated and to [`false`] otherwise.
fn update_interpretation_fixpoint_upd(
&mut self,
interpretation: &[Term],
update: &mut bool,
) -> Vec<Term> {
let mut cur_int = interpretation.to_vec();
*update = false;
loop {
let new_int = self.update_interpretation(interpretation);
if cur_int == new_int {
return cur_int;
} else {
cur_int = new_int;
*update = true;
}
}
}
fn update_interpretation(&mut self, interpretation: &[Term]) -> Vec<Term> {
self.apply_interpretation(interpretation, interpretation)
}
@ -681,7 +705,7 @@ impl Adf {
interpretation
.iter()
.map(|t| {
let mcs = self.bdd.models(*t, true);
let mcs = self.bdd.models(*t, false);
let n_vdps = { |t| self.bdd.var_dependencies(t).len() };
@ -697,11 +721,193 @@ impl Adf {
})
.collect::<Vec<_>>()
}
/// Computes the stable extensions of a given [`Adf`], using the [`NoGood`]-learner.
pub fn stable_nogood<'a, 'c>(
&'a mut self,
heuristic: Heuristic,
) -> impl Iterator<Item = Vec<Term>> + 'c
where
'a: 'c,
{
let grounded = self.grounded();
let heu = heuristic.get_heuristic();
let (s, r) = crossbeam_channel::unbounded::<Vec<Term>>();
self.stable_nogood_get_vec(&grounded, heu, s, r).into_iter()
}
/// Computes the stable extension of a given [`Adf`], using the [`NoGood`]-learner.
/// Needs a [`Sender`][crossbeam_channel::Sender<Vec<crate::datatypes::Term>>] where the results of the computation can be put to.
pub fn stable_nogood_channel(
&mut self,
heuristic: Heuristic,
sender: crossbeam_channel::Sender<Vec<Term>>,
) {
let grounded = self.grounded();
self.nogood_internal(
&grounded,
heuristic.get_heuristic(),
Self::stability_check,
sender,
);
}
/// Computes the two valued extension of a given [`Adf`], using the [`NoGood`]-learner.
/// Needs a [`Sender`][crossbeam_channel::Sender<Vec<crate::datatypes::Term>>] where the results of the computation can be put to.
pub fn two_val_nogood_channel(
&mut self,
heuristic: Heuristic,
sender: crossbeam_channel::Sender<Vec<Term>>,
) {
let grounded = self.grounded();
self.nogood_internal(
&grounded,
heuristic.get_heuristic(),
|_self: &mut Self, _int: &[Term]| true,
sender,
)
}
fn stable_nogood_get_vec<H>(
&mut self,
interpretation: &[Term],
heuristic: H,
s: crossbeam_channel::Sender<Vec<Term>>,
r: crossbeam_channel::Receiver<Vec<Term>>,
) -> Vec<Vec<Term>>
where
H: Fn(&Self, &[Term]) -> Option<(Var, Term)>,
{
self.nogood_internal(interpretation, heuristic, Self::stability_check, s);
r.iter().collect()
}
fn nogood_internal<H, I>(
&mut self,
interpretation: &[Term],
heuristic: H,
stability_check: I,
s: crossbeam_channel::Sender<Vec<Term>>,
) where
H: Fn(&Self, &[Term]) -> Option<(Var, Term)>,
I: Fn(&mut Self, &[Term]) -> bool,
{
let mut cur_interpr = interpretation.to_vec();
let mut ng_store = NoGoodStore::new(
self.ac
.len()
.try_into()
.expect("Expecting only u32 many statements"),
);
let mut stack: Vec<(bool, NoGood)> = Vec::new();
let mut interpr_history: Vec<Vec<Term>> = Vec::new();
let mut backtrack = false;
let mut update_ng;
let mut update_fp = false;
let mut choice = false;
log::debug!("start learning loop");
loop {
log::trace!("interpr: {:?}", cur_interpr);
log::trace!("choice: {}", choice);
if choice {
choice = false;
if let Some((var, term)) = heuristic(&*self, &cur_interpr) {
log::trace!("choose {}->{}", var, term.is_true());
interpr_history.push(cur_interpr.to_vec());
cur_interpr[var.value()] = term;
stack.push((true, cur_interpr.as_slice().into()));
} else {
backtrack = true;
}
}
update_ng = true;
log::trace!("backtrack: {}", backtrack);
if backtrack {
backtrack = false;
if stack.is_empty() {
break;
}
while let Some((choice, ng)) = stack.pop() {
log::trace!("adding ng: {:?}", ng);
ng_store.add_ng(ng);
if choice {
cur_interpr = interpr_history.pop().expect("both stacks (interpr_history and `stack`) should always be synchronous");
log::trace!(
"choice found, reverting interpretation to {:?}",
cur_interpr
);
break;
}
}
}
match ng_store.conclusion_closure(&cur_interpr) {
crate::nogoods::ClosureResult::Update(new_int) => {
cur_interpr = new_int;
log::trace!("ng update: {:?}", cur_interpr);
stack.push((false, cur_interpr.as_slice().into()));
}
crate::nogoods::ClosureResult::NoUpdate => {
log::trace!("no update");
update_ng = false;
}
crate::nogoods::ClosureResult::Inconsistent => {
log::trace!("inconsistency");
backtrack = true;
continue;
}
}
let ac_consistent_interpr = self.apply_interpretation(&self.ac.clone(), &cur_interpr);
log::trace!(
"checking consistency of {:?} against {:?}",
ac_consistent_interpr,
cur_interpr
);
if cur_interpr
.iter()
.zip(ac_consistent_interpr.iter())
.any(|(cur, ac)| {
cur.is_truth_value() && ac.is_truth_value() && cur.is_true() != ac.is_true()
})
{
log::trace!("ac_inconsistency");
backtrack = true;
continue;
}
cur_interpr = self.update_interpretation_fixpoint_upd(&cur_interpr, &mut update_fp);
if update_fp {
log::trace!("fixpount updated");
//stack.push((false, cur_interpr.as_slice().into()));
} else if !update_ng {
// No updates done this loop
if !self.is_two_valued(&cur_interpr) {
choice = true;
} else if stability_check(self, &cur_interpr) {
// stable model found
stack.push((false, cur_interpr.as_slice().into()));
s.send(cur_interpr.clone())
.expect("Sender should accept results");
backtrack = true;
} else {
// not stable
log::trace!("2 val not stable");
stack.push((false, cur_interpr.as_slice().into()));
backtrack = true;
}
}
}
log::info!("{ng_store}");
log::debug!("{:?}", ng_store);
}
}
#[cfg(test)]
mod test {
use super::*;
use crossbeam_channel::unbounded;
use test_log::test;
#[test]
@ -712,11 +918,16 @@ mod test {
parser.parse()(input).unwrap();
let adf = Adf::from_parser(&parser);
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a");
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "c");
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "b");
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "e");
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "d");
assert_eq!(adf.ordering.name(Var(0)), Some("a".to_string()));
assert_eq!(adf.ordering.names().read().unwrap()[0], "a");
assert_eq!(adf.ordering.name(Var(1)), Some("c".to_string()));
assert_eq!(adf.ordering.names().read().unwrap()[1], "c");
assert_eq!(adf.ordering.name(Var(2)), Some("b".to_string()));
assert_eq!(adf.ordering.names().read().unwrap()[2], "b");
assert_eq!(adf.ordering.name(Var(3)), Some("e".to_string()));
assert_eq!(adf.ordering.names().read().unwrap()[3], "e");
assert_eq!(adf.ordering.name(Var(4)), Some("d".to_string()));
assert_eq!(adf.ordering.names().read().unwrap()[4], "d");
assert_eq!(adf.ac, vec![Term(4), Term(2), Term(7), Term(15), Term(12)]);
@ -727,11 +938,11 @@ mod test {
parser.varsort_alphanum();
let adf = Adf::from_parser(&parser);
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a");
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "b");
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "c");
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "d");
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "e");
assert_eq!(adf.ordering.names().read().unwrap()[0], "a");
assert_eq!(adf.ordering.names().read().unwrap()[1], "b");
assert_eq!(adf.ordering.names().read().unwrap()[2], "c");
assert_eq!(adf.ordering.names().read().unwrap()[3], "d");
assert_eq!(adf.ordering.names().read().unwrap()[4], "e");
assert_eq!(adf.ac, vec![Term(3), Term(7), Term(2), Term(11), Term(13)]);
}
@ -882,6 +1093,145 @@ mod test {
assert_eq!(adf.stable_count_optimisation_heu_b().next(), None);
}
#[test]
fn stable_nogood() {
let parser = AdfParser::default();
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
.unwrap();
let mut adf = Adf::from_parser(&parser);
let grounded = adf.grounded();
let (s, r) = unbounded();
adf.nogood_internal(
&grounded,
crate::adf::heuristics::heu_simple,
crate::adf::Adf::stability_check,
s,
);
assert_eq!(
r.iter().collect::<Vec<_>>(),
vec![vec![
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
]]
);
let mut stable_iter = adf.stable_nogood(Heuristic::Simple);
assert_eq!(
stable_iter.next(),
Some(vec![
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
])
);
assert_eq!(stable_iter.next(), None);
let parser = AdfParser::default();
parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap();
let mut adf = Adf::from_parser(&parser);
let grounded = adf.grounded();
let (s, r) = unbounded();
adf.nogood_internal(
&grounded,
crate::adf::heuristics::heu_simple,
crate::adf::Adf::stability_check,
s.clone(),
);
let stable_result = r.try_iter().collect::<Vec<_>>();
assert_eq!(
stable_result,
vec![vec![Term(1), Term(0)], vec![Term(0), Term(1)]]
);
let stable = adf.stable_nogood(Heuristic::Simple);
assert_eq!(
stable.collect::<Vec<_>>(),
vec![vec![Term(1), Term(0)], vec![Term(0), Term(1)]]
);
let stable = adf.stable_nogood(Heuristic::Custom(&|_adf, interpr| {
for (idx, term) in interpr.iter().enumerate() {
if !term.is_truth_value() {
return Some((Var(idx), Term::BOT));
}
}
None
}));
assert_eq!(
stable.collect::<Vec<_>>(),
vec![vec![Term(0), Term(1)], vec![Term(1), Term(0)]]
);
adf.stable_nogood_channel(Heuristic::default(), s);
assert_eq!(
r.iter().collect::<Vec<_>>(),
vec![vec![Term(1), Term(0)], vec![Term(0), Term(1)]]
);
// multi-threaded usage
let (s, r) = unbounded();
let solving = std::thread::spawn(move || {
let parser = AdfParser::default();
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
.unwrap();
let mut adf = Adf::from_parser(&parser);
adf.stable_nogood_channel(Heuristic::MinModMaxVarImpMinPaths, s.clone());
adf.stable_nogood_channel(Heuristic::MinModMinPathsMaxVarImp, s.clone());
adf.two_val_nogood_channel(Heuristic::Simple, s)
});
let mut result_vec = Vec::new();
while let Ok(result) = r.recv() {
result_vec.push(result);
}
assert_eq!(
result_vec,
vec![
vec![
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
],
vec![
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
],
vec![
Term::TOP,
Term::TOP,
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP
],
vec![
Term::TOP,
Term::BOT,
Term::BOT,
Term::TOP,
Term::BOT,
Term::TOP
],
]
);
solving.join().unwrap();
}
#[test]
fn complete() {
let parser = AdfParser::default();
@ -924,6 +1274,7 @@ mod test {
}
}
#[cfg(feature = "adhoccountmodels")]
#[test]
fn formulacounts() {
let parser = AdfParser::default();

139
lib/src/adf/heuristics.rs Normal file
View File

@ -0,0 +1,139 @@
/*!
This module contains all the crate-wide defined heuristic functions.
In addition there is the public enum [Heuristic], which allows to set a heuristic function with the public API.
*/
use super::Adf;
use crate::datatypes::{Term, Var};
use strum::{EnumString, EnumVariantNames};
/// Return value for heuristics.
pub type RetVal = Option<(Var, Term)>;
/// Signature for heuristics functions.
pub type HeuristicFn = dyn Fn(&Adf, &[Term]) -> RetVal;
pub(crate) fn heu_simple(_adf: &Adf, interpr: &[Term]) -> Option<(Var, Term)> {
for (idx, term) in interpr.iter().enumerate() {
if !term.is_truth_value() {
return Some((Var(idx), Term::TOP));
}
}
None
}
pub(crate) fn heu_mc_minpaths_maxvarimp(adf: &Adf, interpr: &[Term]) -> Option<(Var, Term)> {
interpr
.iter()
.enumerate()
.filter(|(_var, term)| !term.is_truth_value())
.min_by(|(vara, &terma), (varb, &termb)| {
match adf
.bdd
.paths(terma, true)
.minimum()
.cmp(&adf.bdd.paths(termb, true).minimum())
{
std::cmp::Ordering::Equal => adf
.bdd
.passive_var_impact(Var::from(*vara), interpr)
.cmp(&adf.bdd.passive_var_impact(Var::from(*varb), interpr)),
value => value,
}
})
.map(|(var, term)| {
(
Var::from(var),
adf.bdd.paths(*term, true).more_models().into(),
)
})
}
pub(crate) fn heu_mc_maxvarimp_minpaths(adf: &Adf, interpr: &[Term]) -> Option<(Var, Term)> {
interpr
.iter()
.enumerate()
.filter(|(_var, term)| !term.is_truth_value())
.min_by(|(vara, &terma), (varb, &termb)| {
match adf
.bdd
.passive_var_impact(Var::from(*vara), interpr)
.cmp(&adf.bdd.passive_var_impact(Var::from(*varb), interpr))
{
std::cmp::Ordering::Equal => adf
.bdd
.paths(terma, true)
.minimum()
.cmp(&adf.bdd.paths(termb, true).minimum()),
value => value,
}
})
.map(|(var, term)| {
(
Var::from(var),
adf.bdd.paths(*term, true).more_models().into(),
)
})
}
/// Enumeration of all currently implemented heuristics.
/// It represents a public view on the crate-view implementations of heuristics.
#[derive(EnumString, EnumVariantNames, Copy, Clone)]
pub enum Heuristic<'a> {
/// Implementation of a simple heuristic.
/// This will just take the first not decided variable and maps it value to (`true`)[Term::TOP].
Simple,
/// Implementation of a heuristic, which which uses minimal number of [paths][crate::obdd::Bdd::paths] and maximal [variable-impact][crate::obdd::Bdd::passive_var_impact to identify the variable to be set.
/// As the value of the variable value with the maximal model-path is chosen.
MinModMinPathsMaxVarImp,
/// Implementation of a heuristic, which which uses maximal [variable-impact][crate::obdd::Bdd::passive_var_impact] and minimal number of [paths][crate::obdd::Bdd::paths] to identify the variable to be set.
/// As the value of the variable value with the maximal model-path is chosen.
MinModMaxVarImpMinPaths,
/// Allow passing in an externally-defined custom heuristic.
#[strum(disabled)]
Custom(&'a HeuristicFn),
}
impl Default for Heuristic<'_> {
fn default() -> Self {
Self::Simple
}
}
impl std::fmt::Debug for Heuristic<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Simple => write!(f, "Simple"),
Self::MinModMinPathsMaxVarImp => write!(f, "Maximal model-path count as value and minimum paths with maximal variable impact as variable choice"),
Self::MinModMaxVarImpMinPaths => write!(f, "Maximal model-path count as value and maximal variable impact with minimum paths as variable choice"),
Self::Custom(_) => f.debug_tuple("Custom function").finish(),
}
}
}
impl Heuristic<'_> {
pub(crate) fn get_heuristic(&self) -> &(dyn Fn(&Adf, &[Term]) -> RetVal + '_) {
match self {
Heuristic::Simple => &heu_simple,
Heuristic::MinModMinPathsMaxVarImp => &heu_mc_minpaths_maxvarimp,
Heuristic::MinModMaxVarImpMinPaths => &heu_mc_maxvarimp_minpaths,
Self::Custom(f) => f,
}
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::datatypes::Term;
use crate::datatypes::Var;
#[test]
fn debug_out() {
dbg!(Heuristic::Simple);
dbg!(Heuristic::MinModMaxVarImpMinPaths);
dbg!(Heuristic::MinModMinPathsMaxVarImp);
dbg!(Heuristic::Custom(&|_adf: &Adf,
_int: &[Term]|
-> Option<(Var, Term)> { None }));
}
}

View File

@ -40,19 +40,17 @@ impl Adf {
pub fn from_parser(parser: &AdfParser) -> Self {
log::info!("[Start] instantiating BDD");
let mut bdd_var_builder = biodivine_lib_bdd::BddVariableSetBuilder::new();
let namelist = parser.namelist_rc_refcell().as_ref().borrow().clone();
let namelist = parser
.namelist()
.read()
.expect("ReadLock on namelist failed")
.clone();
let slice_vec: Vec<&str> = namelist.iter().map(<_>::as_ref).collect();
bdd_var_builder.make_variables(&slice_vec);
let bdd_variables = bdd_var_builder.build();
let mut result = Self {
ordering: VarContainer::from_parser(
parser.namelist_rc_refcell(),
parser.dict_rc_refcell(),
),
ac: vec![
bdd_variables.mk_false();
parser.namelist_rc_refcell().as_ref().borrow().len()
],
ordering: parser.var_container(),
ac: vec![bdd_variables.mk_false(); parser.dict_size()],
vars: bdd_variables.variables(),
varset: bdd_variables,
rewrite: None,
@ -319,6 +317,7 @@ impl Adf {
.collect::<Vec<Vec<Term>>>()
}
/// compute the stable representation
fn stable_representation(&self) -> Bdd {
log::debug!("[Start] stable representation rewriting");
self.ac.iter().enumerate().fold(

View File

@ -2,49 +2,66 @@
use super::{Term, Var};
use serde::{Deserialize, Serialize};
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc};
use std::{collections::HashMap, fmt::Display, sync::Arc, sync::RwLock};
#[derive(Serialize, Deserialize, Debug)]
pub(crate) struct VarContainer {
names: Rc<RefCell<Vec<String>>>,
mapping: Rc<RefCell<HashMap<String, usize>>>,
/// A container which acts as a dictionary as well as an ordering of variables.
/// *names* is a list of variable-names and the sequence of the values is inducing the order of variables.
/// *mapping* allows to search for a variable name and to receive the corresponding position in the variable list (`names`).
///
/// # Important note
/// If one [VarContainer] is used to instantiate an [Adf][crate::adf::Adf] (resp. [Biodivine Adf][crate::adfbiodivine::Adf]) a revision (other than adding more information) might result in wrong variable-name mapping when trying to print the output using the [PrintDictionary].
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VarContainer {
names: Arc<RwLock<Vec<String>>>,
mapping: Arc<RwLock<HashMap<String, usize>>>,
}
impl Default for VarContainer {
fn default() -> Self {
VarContainer {
names: Rc::new(RefCell::new(Vec::new())),
mapping: Rc::new(RefCell::new(HashMap::new())),
names: Arc::new(RwLock::new(Vec::new())),
mapping: Arc::new(RwLock::new(HashMap::new())),
}
}
}
impl VarContainer {
pub fn from_parser(
names: Rc<RefCell<Vec<String>>>,
mapping: Rc<RefCell<HashMap<String, usize>>>,
pub(crate) fn from_parser(
names: Arc<RwLock<Vec<String>>>,
mapping: Arc<RwLock<HashMap<String, usize>>>,
) -> VarContainer {
VarContainer { names, mapping }
}
pub fn copy(from: &Self) -> Self {
VarContainer {
names: from.names.clone(),
mapping: from.mapping.clone(),
}
}
/// Get the [Var] used by the `Bdd` which corresponds to the given [&str].
/// Returns [None] if no matching value is found.
pub fn variable(&self, name: &str) -> Option<Var> {
self.mapping.borrow().get(name).map(|val| Var(*val))
self.mapping
.read()
.ok()
.and_then(|map| map.get(name).map(|val| Var(*val)))
}
/// Get the name which corresponds to the given [Var].
/// Returns [None] if no matching value is found.
pub fn name(&self, var: Var) -> Option<String> {
self.names.borrow().get(var.value()).cloned()
self.names
.read()
.ok()
.and_then(|name| name.get(var.value()).cloned())
}
#[allow(dead_code)]
pub fn names(&self) -> Rc<RefCell<Vec<String>>> {
Rc::clone(&self.names)
pub(crate) fn names(&self) -> Arc<RwLock<Vec<String>>> {
Arc::clone(&self.names)
}
pub(crate) fn mappings(&self) -> Arc<RwLock<HashMap<String, usize>>> {
Arc::clone(&self.mapping)
}
/// Creates a [PrintDictionary] for output purposes.
pub fn print_dictionary(&self) -> PrintDictionary {
PrintDictionary::new(self)
}
}
/// A struct which holds the dictionary to print interpretations and allows to instantiate printable interpretations.
@ -56,7 +73,7 @@ pub struct PrintDictionary {
impl PrintDictionary {
pub(crate) fn new(order: &VarContainer) -> Self {
Self {
ordering: VarContainer::copy(order),
ordering: order.clone(),
}
}
/// creates a [PrintableInterpretation] for output purposes

View File

@ -25,6 +25,16 @@ impl From<usize> for Term {
}
}
impl From<bool> for Term {
fn from(val: bool) -> Self {
if val {
Self::TOP
} else {
Self::BOT
}
}
}
impl From<&biodivine_lib_bdd::Bdd> for Term {
fn from(val: &biodivine_lib_bdd::Bdd) -> Self {
if val.is_true() {

View File

@ -159,6 +159,44 @@ let printer = adf.print_dictionary();
for model in adf.complete() {
print!("{}", printer.print_interpretation(&model));
}
```
### Using the [`NoGood`][crate::nogoods::NoGood]-learner approach, together with the [`crossbeam-channel`] implementation
This can be used to have a worker and a consumer thread to print the results as they are computed.
Please note that the [`NoGood`][crate::nogoods::NoGood]-learner needs a heuristics function to work.
The enum [`Heuristic`][crate::adf::heuristics::Heuristic] allows one to choose a pre-defined heuristic, or implement a `Custom` one.
```rust
use adf_bdd::parser::AdfParser;
use adf_bdd::adf::Adf;
use adf_bdd::adf::heuristics::Heuristic;
use adf_bdd::datatypes::{Term, adf::VarContainer};
// create a channel
let (s, r) = crossbeam_channel::unbounded();
let variables = VarContainer::default();
let variables_worker = variables.clone();
// spawn a solver thread
let solving = std::thread::spawn(move || {
// use the above example as input
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
let parser = AdfParser::with_var_container(variables_worker);
parser.parse()(&input).expect("parsing worked well");
// use hybrid approach
let mut adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser).hybrid_step();
// compute stable with the simple heuristic
adf.stable_nogood_channel(Heuristic::Simple, s);
});
let printer = variables.print_dictionary();
// print results as they are computed
while let Ok(result) = r.recv() {
print!("stable model: {:?} \n", result);
// use dictionary
print!("stable model with variable names: {}", printer.print_interpretation(&result));
# assert_eq!(result, vec![Term(1),Term(1),Term(0),Term(0)]);
}
// waiting for the other thread to close
solving.join().unwrap();
```
*/
#![deny(
@ -180,8 +218,8 @@ for model in adf.complete() {
pub mod adf;
pub mod adfbiodivine;
pub mod datatypes;
pub mod nogoods;
pub mod obdd;
pub mod parser;
#[cfg(test)]
mod test;
//pub mod obdd2;

820
lib/src/nogoods.rs Normal file
View File

@ -0,0 +1,820 @@
//! Collection of all nogood-related structures.
use std::{
borrow::Borrow,
fmt::{Debug, Display},
ops::{BitAnd, BitOr, BitXor, BitXorAssign},
};
use crate::datatypes::Term;
use roaring::RoaringBitmap;
/// A [NoGood] and an [Interpretation] can be represented by the same structure.
/// Moreover this duality (i.e. an [Interpretation] becomes a [NoGood] is reflected by this type alias.
pub type Interpretation = NoGood;
/// Representation of a nogood by a pair of [Bitmaps][RoaringBitmap]
#[derive(Debug, Default, Clone)]
pub struct NoGood {
active: RoaringBitmap,
value: RoaringBitmap,
}
impl Eq for NoGood {}
impl PartialEq for NoGood {
fn eq(&self, other: &Self) -> bool {
self.active
.borrow()
.bitxor(other.active.borrow())
.is_empty()
&& self.value.borrow().bitxor(other.value.borrow()).is_empty()
}
}
impl NoGood {
/// Creates an [Interpretation] from a given Vector of [Terms][Term].
pub fn from_term_vec(term_vec: &[Term]) -> Interpretation {
let mut result = Self::default();
term_vec.iter().enumerate().for_each(|(idx, val)| {
let idx:u32 = idx.try_into().expect("no-good learner implementation is based on the assumption that only u32::MAX-many variables are in place");
if val.is_truth_value() {
result.active.insert(idx);
if val.is_true() {
result.value.insert(idx);
}
}
});
result
}
/// Creates a [NoGood] representing an atomic assignment.
pub fn new_single_nogood(pos: usize, val: bool) -> NoGood {
let mut result = Self::default();
let pos:u32 = pos.try_into().expect("nog-good learner implementation is based on the assumption that only u32::MAX-many variables are in place");
result.active.insert(pos);
if val {
result.value.insert(pos);
}
result
}
/// Returns [None] if the pair contains inconsistent pairs.
/// Otherwise it returns an [Interpretation] which represents the set values.
pub fn try_from_pair_iter(
pair_iter: &mut impl Iterator<Item = (usize, bool)>,
) -> Option<Interpretation> {
let mut result = Self::default();
let mut visit = false;
for (idx, val) in pair_iter {
visit = true;
let idx:u32 = idx.try_into().expect("no-good learner implementation is based on the assumption that only u32::MAX-many variables are in place");
let is_new = result.active.insert(idx);
let upd = if val {
result.value.insert(idx)
} else {
result.value.remove(idx)
};
// if the state is not new and the value is changed
if !is_new && upd {
return None;
}
}
visit.then_some(result)
}
/// Creates an updated [Vec<Term>], based on the given [&[Term]] and the [NoGood].
/// The parameter _update_ is set to [`true`] if there has been an update and to [`false`] otherwise
pub fn update_term_vec(&self, term_vec: &[Term], update: &mut bool) -> Vec<Term> {
*update = false;
term_vec
.iter()
.enumerate()
.map(|(idx, val)| {
let idx: u32 = idx.try_into().expect(
"no-good learner implementation is based on the assumption \
that only u32::MAX-many variables are in place",
);
if self.active.contains(idx) {
if !val.is_truth_value() {
*update = true;
}
if self.value.contains(idx) {
Term::TOP
} else {
Term::BOT
}
} else {
*val
}
})
.collect()
}
/// Given a [NoGood] and another one, conclude a non-conflicting value which can be concluded on basis of the given one.
pub fn conclude(&self, other: &NoGood) -> Option<(usize, bool)> {
log::debug!("conclude: {:?} other {:?}", self, other);
let implication = self
.active
.borrow()
.bitxor(other.active.borrow())
.bitand(self.active.borrow());
let bothactive = self.active.borrow().bitand(other.active.borrow());
let mut no_matches = bothactive.borrow().bitand(other.value.borrow());
no_matches.bitxor_assign(bothactive.bitand(self.value.borrow()));
if implication.len() == 1 && no_matches.is_empty() {
let pos = implication
.min()
.expect("just checked that there is one element to be found");
log::trace!(
"Conclude {:?}",
Some((pos as usize, !self.value.contains(pos)))
);
Some((pos as usize, !self.value.contains(pos)))
} else {
log::trace!("Nothing to Conclude");
None
}
}
/// Updates the [NoGood] and a second one in a disjunctive (bitor) manner.
pub fn disjunction(&mut self, other: &NoGood) {
self.active = self.active.borrow().bitor(&other.active);
self.value = self.value.borrow().bitor(&other.value);
}
/// Returns [true] if the other [Interpretation] matches with all the assignments of the current [NoGood].
pub fn is_violating(&self, other: &Interpretation) -> bool {
let active = self.active.borrow().bitand(other.active.borrow());
if self.active.len() == active.len() {
let lhs = active.borrow().bitand(self.value.borrow());
let rhs = active.borrow().bitand(other.value.borrow());
if lhs.bitxor(rhs).is_empty() {
return true;
}
}
false
}
/// Returns the number of set (i.e. active) bits.
pub fn len(&self) -> usize {
self.active
.len()
.try_into()
.expect("expecting to be on a 64 bit system")
}
#[must_use]
/// Returns [true] if the [NoGood] does not set any value.
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl From<&[Term]> for NoGood {
fn from(term_vec: &[Term]) -> Self {
Self::from_term_vec(term_vec)
}
}
/// A structure to store [NoGoods][NoGood] and offer operations and deductions based on them.
#[derive(Debug)]
pub struct NoGoodStore {
store: Vec<Vec<NoGood>>,
duplicates: DuplicateElemination,
}
impl Display for NoGoodStore {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "NoGoodStats: [")?;
for (arity, vec) in self.store.iter().enumerate() {
writeln!(f, "{arity}: {}", vec.len())?;
log::debug!("Nogoods:\n {:?}", vec);
}
write!(f, "]")
}
}
impl NoGoodStore {
/// Creates a new [NoGoodStore] and assumes a size compatible with the underlying [NoGood] implementation.
pub fn new(size: u32) -> NoGoodStore {
Self {
store: vec![Vec::new(); size as usize],
duplicates: DuplicateElemination::Equiv,
}
}
/// Tries to create a new [NoGoodStore].
/// Does not succeed if the size is too big for the underlying [NoGood] implementation.
pub fn try_new(size: usize) -> Option<NoGoodStore> {
Some(Self::new(size.try_into().ok()?))
}
/// Sets the behaviour when managing duplicates.
pub fn set_dup_elem(&mut self, mode: DuplicateElemination) {
self.duplicates = mode;
}
/// Adds a given [NoGood]
pub fn add_ng(&mut self, nogood: NoGood) {
let mut idx = nogood.len();
if idx > 0 {
idx -= 1;
if match self.duplicates {
DuplicateElemination::None => true,
DuplicateElemination::Equiv => !self.store[idx].contains(&nogood),
DuplicateElemination::Subsume => {
self.store
.iter_mut()
.enumerate()
.for_each(|(cur_idx, ng_vec)| {
if idx >= cur_idx {
ng_vec.retain(|ng| !ng.is_violating(&nogood));
}
});
true
}
} {
self.store[idx].push(nogood);
}
}
}
/// Draws a (Conclusion)[NoGood], based on the [NoGoodStore] and the given [NoGood].
/// *Returns* [None] if there is a conflict
pub fn conclusions(&self, nogood: &NoGood) -> Option<NoGood> {
let mut result = nogood.clone();
log::trace!("ng-store: {:?}", self.store);
self.store
.iter()
.enumerate()
.filter(|(len, _vec)| *len <= nogood.len())
.filter_map(|(_len, val)| {
NoGood::try_from_pair_iter(&mut val.iter().filter_map(|ng| ng.conclude(nogood)))
})
.try_fold(&mut result, |acc, ng| {
if ng.is_violating(acc) {
log::trace!("ng conclusion violating");
None
} else {
acc.disjunction(&ng);
Some(acc)
}
})?;
if self
.store
.iter()
.enumerate()
.filter(|(len, _vec)| *len <= nogood.len())
.any(|(_, vec)| {
vec.iter()
.any(|elem| elem.is_violating(&result) || elem.is_violating(nogood))
})
{
return None;
}
Some(result)
}
/// Constructs the Closure of the conclusions drawn by the nogoods with respect to the given `interpretation`
pub(crate) fn conclusion_closure(&self, interpretation: &[Term]) -> ClosureResult {
let mut update = true;
let mut result = match self.conclusions(&interpretation.into()) {
Some(val) => {
log::trace!(
"conclusion-closure step 1: val:{:?} -> {:?}",
val,
val.update_term_vec(interpretation, &mut update)
);
val.update_term_vec(interpretation, &mut update)
}
None => return ClosureResult::Inconsistent,
};
if !update {
return ClosureResult::NoUpdate;
}
while update {
match self.conclusions(&result.as_slice().into()) {
Some(val) => result = val.update_term_vec(&result, &mut update),
None => return ClosureResult::Inconsistent,
}
}
ClosureResult::Update(result)
}
}
/// Allows to define how costly the DuplicateElemination is done.
#[derive(Debug, Copy, Clone)]
pub enum DuplicateElemination {
/// No Duplicate Detection
None,
/// Only check weak equivalence
Equiv,
/// Check for subsumptions
Subsume,
}
/// If the closure had some issues, it is represented with this enum
#[derive(Debug, PartialEq, Eq)]
pub(crate) enum ClosureResult {
Update(Vec<Term>),
NoUpdate,
Inconsistent,
}
impl ClosureResult {
/// Dead_code due to (currently) unused utility function for the [ClosureResult] enum.
#[allow(dead_code)]
pub fn is_update(&self) -> bool {
matches!(self, Self::Update(_))
}
/// Dead_code due to (currently) unused utility function for the [ClosureResult] enum.
#[allow(dead_code)]
pub fn is_no_update(&self) -> bool {
matches!(self, Self::NoUpdate)
}
/// Dead_code due to (currently) unused utility function for the [ClosureResult] enum.
#[allow(dead_code)]
pub fn is_inconsistent(&self) -> bool {
matches!(self, Self::Inconsistent)
}
}
impl TryInto<Vec<Term>> for ClosureResult {
type Error = &'static str;
fn try_into(self) -> Result<Vec<Term>, Self::Error> {
match self {
ClosureResult::Update(val) => Ok(val),
ClosureResult::NoUpdate => Err("No update occurred, use the old value instead"),
ClosureResult::Inconsistent => Err("Inconsistency occurred"),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use test_log::test;
#[test]
fn create_ng() {
let terms = vec![Term::TOP, Term(22), Term(13232), Term::BOT, Term::TOP];
let ng = NoGood::from_term_vec(&terms);
assert_eq!(ng.active.len(), 3);
assert_eq!(ng.value.len(), 2);
assert!(ng.active.contains(0));
assert!(!ng.active.contains(1));
assert!(!ng.active.contains(2));
assert!(ng.active.contains(3));
assert!(ng.active.contains(4));
assert!(ng.value.contains(0));
assert!(!ng.value.contains(1));
assert!(!ng.value.contains(2));
assert!(!ng.value.contains(3));
assert!(ng.value.contains(4));
}
#[test]
fn conclude() {
let ng1 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term::TOP, Term::BOT, Term::TOP]);
let ng2 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term(13232), Term::BOT, Term::TOP]);
let ng3 = NoGood::from_term_vec(&[
Term::TOP,
Term(22),
Term(13232),
Term::BOT,
Term::TOP,
Term::BOT,
]);
assert_eq!(ng1.conclude(&ng2), Some((2, false)));
assert_eq!(ng1.conclude(&ng1), None);
assert_eq!(ng2.conclude(&ng1), None);
assert_eq!(ng1.conclude(&ng3), Some((2, false)));
assert_eq!(ng3.conclude(&ng1), Some((5, true)));
assert_eq!(ng3.conclude(&ng2), Some((5, true)));
// conclusions on empty knowledge
let ng4 = NoGood::from_term_vec(&[Term::TOP]);
let ng5 = NoGood::from_term_vec(&[Term::BOT]);
let ng6 = NoGood::from_term_vec(&[]);
assert_eq!(ng4.conclude(&ng6), Some((0, false)));
assert_eq!(ng5.conclude(&ng6), Some((0, true)));
assert_eq!(ng6.conclude(&ng5), None);
assert_eq!(ng4.conclude(&ng5), None);
let ng_a = NoGood::from_term_vec(&[Term::BOT, Term(22)]);
let ng_b = NoGood::from_term_vec(&[Term(22), Term::TOP]);
assert_eq!(ng_a.conclude(&ng_b), Some((0, true)));
}
#[test]
fn violate() {
let ng1 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term::TOP, Term::BOT, Term::TOP]);
let ng2 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term(13232), Term::BOT, Term::TOP]);
let ng3 = NoGood::from_term_vec(&[
Term::TOP,
Term(22),
Term(13232),
Term::BOT,
Term::TOP,
Term::BOT,
]);
let ng4 = NoGood::from_term_vec(&[Term::TOP]);
assert!(ng4.is_violating(&ng1));
assert!(!ng1.is_violating(&ng4));
assert!(ng2.is_violating(&ng3));
assert!(!ng3.is_violating(&ng2));
assert_eq!(ng4, NoGood::new_single_nogood(0, true));
}
#[test]
fn add_ng() {
let mut ngs = NoGoodStore::new(5);
let ng1 = NoGood::from_term_vec(&[Term::TOP]);
let ng2 = NoGood::from_term_vec(&[Term(22), Term::TOP]);
let ng3 = NoGood::from_term_vec(&[Term(22), Term(22), Term::TOP]);
let ng4 = NoGood::from_term_vec(&[Term(22), Term(22), Term(22), Term::TOP]);
let ng5 = NoGood::from_term_vec(&[Term::BOT]);
assert!(!ng1.is_violating(&ng5));
assert!(ng1.is_violating(&ng1));
ngs.add_ng(ng1.clone());
ngs.add_ng(ng2.clone());
ngs.add_ng(ng3.clone());
ngs.add_ng(ng4.clone());
ngs.add_ng(ng5.clone());
assert_eq!(
ngs.store
.iter()
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
5
);
ngs.set_dup_elem(DuplicateElemination::Equiv);
ngs.add_ng(ng1.clone());
ngs.add_ng(ng2.clone());
ngs.add_ng(ng3.clone());
ngs.add_ng(ng4.clone());
ngs.add_ng(ng5.clone());
assert_eq!(
ngs.store
.iter()
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
5
);
ngs.set_dup_elem(DuplicateElemination::Subsume);
ngs.add_ng(ng1);
ngs.add_ng(ng2);
ngs.add_ng(ng3);
ngs.add_ng(ng4);
ngs.add_ng(ng5);
assert_eq!(
ngs.store
.iter()
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
5
);
ngs.add_ng(NoGood::from_term_vec(&[Term(22), Term::BOT, Term(22)]));
assert_eq!(
ngs.store
.iter()
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
6
);
ngs.add_ng(NoGood::from_term_vec(&[Term(22), Term::BOT, Term::BOT]));
assert_eq!(
ngs.store
.iter()
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
6
);
assert!(NoGood::from_term_vec(&[Term(22), Term::BOT, Term(22)])
.is_violating(&NoGood::from_term_vec(&[Term(22), Term::BOT, Term::BOT])));
}
#[test]
fn ng_store_conclusions() {
let mut ngs = NoGoodStore::new(5);
let ng1 = NoGood::from_term_vec(&[Term::BOT]);
ngs.add_ng(ng1.clone());
assert_eq!(ng1.conclude(&ng1), None);
assert_eq!(
ng1.conclude(&NoGood::from_term_vec(&[Term(33)])),
Some((0, true))
);
assert_eq!(ngs.conclusions(&ng1), None);
assert_ne!(ngs.conclusions(&NoGood::from_term_vec(&[Term(33)])), None);
assert_eq!(
ngs.conclusions(&NoGood::from_term_vec(&[Term(33)]))
.expect("just checked with prev assertion")
.update_term_vec(&[Term(33)], &mut false),
vec![Term::TOP]
);
let ng2 = NoGood::from_term_vec(&[Term(123), Term::TOP, Term(234), Term(345)]);
let ng3 = NoGood::from_term_vec(&[Term::TOP, Term::BOT, Term::TOP, Term(345)]);
ngs.add_ng(ng2);
ngs.add_ng(ng3);
log::debug!("issues start here");
assert!(ngs
.conclusions(&NoGood::from_term_vec(&[Term::TOP]))
.is_some());
assert_eq!(
ngs.conclusions(&[Term::TOP].as_slice().into())
.expect("just checked with prev assertion")
.update_term_vec(&[Term::TOP, Term(4), Term(5), Term(6), Term(7)], &mut false),
vec![Term::TOP, Term::BOT, Term(5), Term(6), Term(7)]
);
assert!(ngs
.conclusions(&NoGood::from_term_vec(&[
Term::TOP,
Term::BOT,
Term(5),
Term(6),
Term(7)
]))
.is_some());
ngs = NoGoodStore::new(10);
ngs.add_ng([Term::BOT].as_slice().into());
ngs.add_ng(
[Term::TOP, Term::BOT, Term(33), Term::TOP]
.as_slice()
.into(),
);
ngs.add_ng(
[Term::TOP, Term::BOT, Term(33), Term(33), Term::BOT]
.as_slice()
.into(),
);
ngs.add_ng([Term::TOP, Term::TOP].as_slice().into());
let interpr: Vec<Term> = vec![
Term(123),
Term(233),
Term(345),
Term(456),
Term(567),
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000),
];
let concl = ngs.conclusions(&interpr.as_slice().into());
assert_eq!(concl, Some(NoGood::from_term_vec(&[Term::TOP])));
let mut update = false;
let new_interpr = concl
.expect("just tested in assert")
.update_term_vec(&interpr, &mut update);
assert_eq!(
new_interpr,
vec![
Term::TOP,
Term(233),
Term(345),
Term(456),
Term(567),
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000)
]
);
assert!(update);
let new_int_2 = ngs
.conclusions(&new_interpr.as_slice().into())
.map(|val| val.update_term_vec(&new_interpr, &mut update))
.expect("Should return a value");
assert_eq!(
new_int_2,
vec![
Term::TOP,
Term::BOT,
Term(345),
Term(456),
Term(567),
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000)
]
);
assert!(update);
let new_int_3 = ngs
.conclusions(&new_int_2.as_slice().into())
.map(|val| val.update_term_vec(&new_int_2, &mut update))
.expect("Should return a value");
assert_eq!(
new_int_3,
vec![
Term::TOP,
Term::BOT,
Term(345),
Term::BOT,
Term::TOP,
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000)
]
);
assert!(update);
let concl4 = ngs.conclusions(&new_int_3.as_slice().into());
assert_ne!(concl4, None);
let new_int_4 = ngs
.conclusions(&new_int_3.as_slice().into())
.map(|val| val.update_term_vec(&new_int_3, &mut update))
.expect("Should return a value");
assert_eq!(
new_int_4,
vec![
Term::TOP,
Term::BOT,
Term(345),
Term::BOT,
Term::TOP,
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000)
]
);
assert!(!update);
// inconsistence
let interpr = vec![
Term::TOP,
Term::TOP,
Term::BOT,
Term::BOT,
Term(111),
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000),
];
assert_eq!(ngs.conclusions(&interpr.as_slice().into()), None);
ngs = NoGoodStore::new(6);
ngs.add_ng(
[Term(1), Term(1), Term(1), Term(0), Term(0), Term(1)]
.as_slice()
.into(),
);
ngs.add_ng(
[Term(1), Term(1), Term(8), Term(0), Term(0), Term(11)]
.as_slice()
.into(),
);
ngs.add_ng([Term(22), Term(1)].as_slice().into());
assert_eq!(
ngs.conclusions(
&[Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)]
.as_slice()
.into(),
),
Some(NoGood::from_term_vec(&[
Term(1),
Term(0),
Term(3),
Term(9),
Term(0),
Term(1)
]))
);
}
#[test]
fn conclusion_closure() {
let mut ngs = NoGoodStore::new(10);
ngs.add_ng([Term::BOT].as_slice().into());
ngs.add_ng(
[Term::TOP, Term::BOT, Term(33), Term::TOP]
.as_slice()
.into(),
);
ngs.add_ng(
[Term::TOP, Term::BOT, Term(33), Term(33), Term::BOT]
.as_slice()
.into(),
);
ngs.add_ng([Term::TOP, Term::TOP].as_slice().into());
let interpr: Vec<Term> = vec![
Term(123),
Term(233),
Term(345),
Term(456),
Term(567),
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000),
];
let result = ngs.conclusion_closure(&interpr);
assert!(result.is_update());
let resultint: Vec<Term> = result.try_into().expect("just checked conversion");
assert_eq!(
resultint,
vec![
Term::TOP,
Term::BOT,
Term(345),
Term::BOT,
Term::TOP,
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000)
]
);
let result_no_upd = ngs.conclusion_closure(&resultint);
assert!(result_no_upd.is_no_update());
assert_eq!(
<ClosureResult as TryInto<Vec<Term>>>::try_into(result_no_upd)
.expect_err("just checked that it is an error"),
"No update occurred, use the old value instead"
);
let inconsistent_interpr = vec![
Term::TOP,
Term::TOP,
Term::BOT,
Term::BOT,
Term(111),
Term(678),
Term(789),
Term(899),
Term(999),
Term(1000),
];
let result_inconsistent = ngs.conclusion_closure(&inconsistent_interpr);
assert!(result_inconsistent.is_inconsistent());
assert_eq!(
<ClosureResult as TryInto<Vec<Term>>>::try_into(result_inconsistent)
.expect_err("just checked that it is an error"),
"Inconsistency occurred"
);
ngs = NoGoodStore::new(6);
ngs.add_ng(
[Term(1), Term(1), Term(1), Term(0), Term(0), Term(1)]
.as_slice()
.into(),
);
ngs.add_ng(
[Term(1), Term(1), Term(8), Term(0), Term(0), Term(11)]
.as_slice()
.into(),
);
ngs.add_ng([Term(22), Term(1)].as_slice().into());
assert_eq!(
ngs.conclusion_closure(&[Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)]),
ClosureResult::Update(vec![Term(1), Term(0), Term(3), Term(9), Term(0), Term(1)])
);
}
}

View File

@ -277,11 +277,14 @@ impl Bdd {
hi_paths.models,
hidepth
);
#[cfg(feature = "adhoccountmodels")]
let (lo_exp, hi_exp) = if lodepth > hidepth {
(1, 2usize.pow((lodepth - hidepth) as u32))
} else {
(2usize.pow((hidepth - lodepth) as u32), 1)
};
#[cfg(not(feature = "adhoccountmodels"))]
let (lo_exp, hi_exp) = (0, 0);
log::debug!("lo_exp {}, hi_exp {}", lo_exp, hi_exp);
count_cache.insert(
new_term,
@ -310,11 +313,11 @@ impl Bdd {
///
/// Use the flag `_memoization` to choose between using the memoization approach or not. (This flag does nothing, if the feature `adhoccounting` is used)
pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts {
#[cfg(feature = "adhoccounting")]
#[cfg(feature = "adhoccountmodels")]
{
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").0;
}
#[cfg(not(feature = "adhoccounting"))]
#[cfg(not(feature = "adhoccountmodels"))]
if _memoization {
self.modelcount_memoization(term).0
} else {
@ -642,6 +645,7 @@ mod test {
let formula3 = bdd.xor(v1, v2);
let formula4 = bdd.and(v3, formula2);
#[cfg(feature = "adhoccountmodels")]
assert_eq!(bdd.models(v1, false), (1, 1).into());
let mut x = bdd.count_cache.get_mut().iter().collect::<Vec<_>>();
x.sort();
@ -650,20 +654,23 @@ mod test {
log::debug!("{:?}", x);
}
log::debug!("{:?}", x);
assert_eq!(bdd.models(formula1, false), (3, 1).into());
assert_eq!(bdd.models(formula2, false), (1, 3).into());
assert_eq!(bdd.models(formula3, false), (2, 2).into());
assert_eq!(bdd.models(formula4, false), (5, 3).into());
assert_eq!(bdd.models(Term::TOP, false), (0, 1).into());
assert_eq!(bdd.models(Term::BOT, false), (1, 0).into());
#[cfg(feature = "adhoccountmodels")]
{
assert_eq!(bdd.models(formula1, false), (3, 1).into());
assert_eq!(bdd.models(formula2, false), (1, 3).into());
assert_eq!(bdd.models(formula3, false), (2, 2).into());
assert_eq!(bdd.models(formula4, false), (5, 3).into());
assert_eq!(bdd.models(Term::TOP, false), (0, 1).into());
assert_eq!(bdd.models(Term::BOT, false), (1, 0).into());
assert_eq!(bdd.models(v1, true), (1, 1).into());
assert_eq!(bdd.models(formula1, true), (3, 1).into());
assert_eq!(bdd.models(formula2, true), (1, 3).into());
assert_eq!(bdd.models(formula3, true), (2, 2).into());
assert_eq!(bdd.models(formula4, true), (5, 3).into());
assert_eq!(bdd.models(Term::TOP, true), (0, 1).into());
assert_eq!(bdd.models(Term::BOT, true), (1, 0).into());
assert_eq!(bdd.models(v1, true), (1, 1).into());
assert_eq!(bdd.models(formula1, true), (3, 1).into());
assert_eq!(bdd.models(formula2, true), (1, 3).into());
assert_eq!(bdd.models(formula3, true), (2, 2).into());
assert_eq!(bdd.models(formula4, true), (5, 3).into());
assert_eq!(bdd.models(Term::TOP, true), (0, 1).into());
assert_eq!(bdd.models(Term::BOT, true), (1, 0).into());
}
assert_eq!(bdd.paths(formula1, false), (2, 1).into());
assert_eq!(bdd.paths(formula2, false), (1, 2).into());
@ -706,32 +713,35 @@ mod test {
((1, 0).into(), (1, 0).into(), 0)
);
assert_eq!(
bdd.modelcount_naive(formula4),
bdd.modelcount_memoization(formula4)
);
#[cfg(feature = "adhoccountmodels")]
{
assert_eq!(
bdd.modelcount_naive(formula4),
bdd.modelcount_memoization(formula4)
);
assert_eq!(bdd.modelcount_naive(v1), bdd.modelcount_memoization(v1));
assert_eq!(
bdd.modelcount_naive(formula1),
bdd.modelcount_memoization(formula1)
);
assert_eq!(
bdd.modelcount_naive(formula2),
bdd.modelcount_memoization(formula2)
);
assert_eq!(
bdd.modelcount_naive(formula3),
bdd.modelcount_memoization(formula3)
);
assert_eq!(
bdd.modelcount_naive(Term::TOP),
bdd.modelcount_memoization(Term::TOP)
);
assert_eq!(
bdd.modelcount_naive(Term::BOT),
bdd.modelcount_memoization(Term::BOT)
);
assert_eq!(bdd.modelcount_naive(v1), bdd.modelcount_memoization(v1));
assert_eq!(
bdd.modelcount_naive(formula1),
bdd.modelcount_memoization(formula1)
);
assert_eq!(
bdd.modelcount_naive(formula2),
bdd.modelcount_memoization(formula2)
);
assert_eq!(
bdd.modelcount_naive(formula3),
bdd.modelcount_memoization(formula3)
);
assert_eq!(
bdd.modelcount_naive(Term::TOP),
bdd.modelcount_memoization(Term::TOP)
);
assert_eq!(
bdd.modelcount_naive(Term::BOT),
bdd.modelcount_memoization(Term::BOT)
);
}
assert_eq!(bdd.max_depth(Term::BOT), 0);
assert_eq!(bdd.max_depth(v1), 1);

View File

@ -10,7 +10,13 @@ use nom::{
sequence::{delimited, preceded, separated_pair, terminated},
IResult,
};
use std::{cell::RefCell, collections::HashMap, rc::Rc};
use std::collections::HashMap;
use std::{
cell::RefCell,
sync::{Arc, RwLock},
};
use crate::datatypes::adf::VarContainer;
/// A representation of a formula, still using the strings from the input.
#[derive(Clone, PartialEq, Eq)]
@ -127,8 +133,8 @@ impl std::fmt::Debug for Formula<'_> {
/// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead.
#[derive(Debug)]
pub struct AdfParser<'a> {
namelist: Rc<RefCell<Vec<String>>>,
dict: Rc<RefCell<HashMap<String, usize>>>,
namelist: Arc<RwLock<Vec<String>>>,
dict: Arc<RwLock<HashMap<String, usize>>>,
formulae: RefCell<Vec<Formula<'a>>>,
formulaname: RefCell<Vec<String>>,
}
@ -136,8 +142,8 @@ pub struct AdfParser<'a> {
impl Default for AdfParser<'_> {
fn default() -> Self {
AdfParser {
namelist: Rc::new(RefCell::new(Vec::new())),
dict: Rc::new(RefCell::new(HashMap::new())),
namelist: Arc::new(RwLock::new(Vec::new())),
dict: Arc::new(RwLock::new(HashMap::new())),
formulae: RefCell::new(Vec::new()),
formulaname: RefCell::new(Vec::new()),
}
@ -176,8 +182,14 @@ where
fn parse_statement(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
|input| {
let mut dict = self.dict.borrow_mut();
let mut namelist = self.namelist.borrow_mut();
let mut dict = self
.dict
.write()
.expect("RwLock of dict could not get write access");
let mut namelist = self
.namelist
.write()
.expect("RwLock of namelist could not get write access");
let (remain, statement) =
terminated(AdfParser::statement, terminated(tag("."), multispace0))(input)?;
if !dict.contains_key(statement) {
@ -200,16 +212,31 @@ where
}
}
impl<'a> AdfParser<'a> {
/// Creates a new parser, utilising the already existing [VarContainer]
pub fn with_var_container(var_container: VarContainer) -> AdfParser<'a> {
AdfParser {
namelist: var_container.names(),
dict: var_container.mappings(),
formulae: RefCell::new(Vec::new()),
formulaname: RefCell::new(Vec::new()),
}
}
}
impl AdfParser<'_> {
/// after an update to the namelist, all indizes are updated
fn regenerate_indizes(&self) {
self.namelist
.as_ref()
.borrow()
.read()
.expect("ReadLock on namelist failed")
.iter()
.enumerate()
.for_each(|(i, elem)| {
self.dict.as_ref().borrow_mut().insert(elem.clone(), i);
self.dict
.write()
.expect("WriteLock on dict failed")
.insert(elem.clone(), i);
});
}
@ -217,7 +244,10 @@ impl AdfParser<'_> {
/// Results, which got used before might become corrupted.
/// Ensure that all used data is physically copied.
pub fn varsort_lexi(&self) -> &Self {
self.namelist.as_ref().borrow_mut().sort_unstable();
self.namelist
.write()
.expect("WriteLock on namelist failed")
.sort_unstable();
self.regenerate_indizes();
self
}
@ -227,8 +257,8 @@ impl AdfParser<'_> {
/// Ensure that all used data is physically copied.
pub fn varsort_alphanum(&self) -> &Self {
self.namelist
.as_ref()
.borrow_mut()
.write()
.expect("WriteLock on namelist failed")
.string_sort_unstable(natural_lexical_cmp);
self.regenerate_indizes();
self
@ -343,14 +373,18 @@ impl AdfParser<'_> {
/// Allows insight of the number of parsed statements.
pub fn dict_size(&self) -> usize {
//self.dict.borrow().len()
self.dict.as_ref().borrow().len()
self.dict.read().expect("ReadLock on dict failed").len()
}
/// Returns the number-representation and position of a given statement in string-representation.
///
/// Will return [None] if the string does no occur in the dictionary.
pub fn dict_value(&self, value: &str) -> Option<usize> {
self.dict.as_ref().borrow().get(value).copied()
self.dict
.read()
.expect("ReadLock on dict failed")
.get(value)
.copied()
}
/// Returns the acceptance condition of a statement at the given position.
@ -360,12 +394,17 @@ impl AdfParser<'_> {
self.formulae.borrow().get(idx).cloned()
}
pub(crate) fn dict_rc_refcell(&self) -> Rc<RefCell<HashMap<String, usize>>> {
Rc::clone(&self.dict)
pub(crate) fn dict(&self) -> Arc<RwLock<HashMap<String, usize>>> {
Arc::clone(&self.dict)
}
pub(crate) fn namelist_rc_refcell(&self) -> Rc<RefCell<Vec<String>>> {
Rc::clone(&self.namelist)
pub(crate) fn namelist(&self) -> Arc<RwLock<Vec<String>>> {
Arc::clone(&self.namelist)
}
/// Returns a [`VarContainer`][crate::datatypes::adf::VarContainer] which allows to access the variable information gathered by the parser
pub fn var_container(&self) -> VarContainer {
VarContainer::from_parser(self.namelist(), self.dict())
}
pub(crate) fn formula_count(&self) -> usize {
@ -379,8 +418,8 @@ impl AdfParser<'_> {
.map(|name| {
*self
.dict
.as_ref()
.borrow()
.read()
.expect("ReadLock on dict failed")
.get(name)
.expect("Dictionary should contain all the used formulanames")
})