Compare commits
74 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ff12d4fede | ||
|
|
c9631346ab | ||
|
|
ca6c94f2e5 | ||
|
|
54e57e3820 | ||
| f601d473cc | |||
| 530fb5c4e6 | |||
| a85b0cb129 | |||
|
|
627a1a1810 | ||
|
|
fc0042fcd1 | ||
|
|
bc403d10cb | ||
|
|
34d3af0c0d | ||
|
|
8cafca22eb | ||
|
|
3979f77d03 | ||
| eeef4729b6 | |||
| b68c0b3d3f | |||
|
|
6c0da967b9 | ||
|
|
716d1194a6 | ||
|
|
3445ae343f | ||
|
|
26e978ca47 | ||
|
|
dd8524e02e | ||
|
|
c9278cf5ce | ||
|
|
73986437f8 | ||
|
|
24c6788c9f | ||
|
|
11083098a2 | ||
|
|
68acf25a39 | ||
|
|
68cbab5b02 | ||
|
|
6d687a3839 | ||
|
|
d807089884 | ||
|
|
683c12a525 | ||
|
|
17a57938c7 | ||
|
|
bc21ecabc7 | ||
|
|
495f9c893f | ||
|
|
ebbf8de684 | ||
| 7000c41a61 | |||
|
|
8269fbac9d | ||
|
|
dccf11479f | ||
|
|
fdf530f6e6 | ||
|
|
0216a10895 | ||
| a0139c66e2 | |||
|
|
d8492dc6d0 | ||
| c4a57bcb84 | |||
|
|
fea31f1590 | ||
|
|
1596dc6818 | ||
|
|
d7e71e5da7 | ||
|
|
45700d7224 | ||
|
|
d4e31ee8c2 | ||
|
|
62fa2e8c94 | ||
|
|
493a834388 | ||
|
|
b7902ef940 | ||
|
|
bdbebc137d | ||
|
|
c29db9a6aa | ||
|
|
78949b18e5 | ||
|
|
e0eea73525 | ||
|
|
98753d696b | ||
|
|
6d09e71ba6 | ||
|
|
c951e78c86 | ||
|
|
6e5df674a5 | ||
|
|
1c8508eb66 | ||
|
|
b8518ee340 | ||
| 9f4689ad45 | |||
|
|
43da337ce9 | ||
|
|
35bb36bfc5 | ||
|
|
432ac0348a | ||
|
|
75a85203b0 | ||
|
|
762c06af04 | ||
|
|
098baf9bdd | ||
|
|
6c116509c6 | ||
|
|
7e66d89d03 | ||
|
|
02dc37cbbf | ||
|
|
f021517a15 | ||
|
|
76c06ab03d | ||
|
|
0554538a88 | ||
|
|
d0026b9491 | ||
|
|
0d4f9ff195 |
5
.dockerignore
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
frontend/node_modules
|
||||||
|
frontend/.parcel-cache
|
||||||
|
server/mongodb-data
|
||||||
|
target/debug
|
||||||
|
|
||||||
1
.github/CONTRIBUTING.md
vendored
@ -65,6 +65,7 @@ Start reading our code and you'll get the hang of it.
|
|||||||
|
|
||||||
* We use `rustfmt` as code-convention. (you can use whatever styles you like, just let `rustfmt` format the code before you commit)
|
* We use `rustfmt` as code-convention. (you can use whatever styles you like, just let `rustfmt` format the code before you commit)
|
||||||
* We try to reduce redundancies in enumeration-variant names.
|
* We try to reduce redundancies in enumeration-variant names.
|
||||||
|
* We try to use the `where` clause over embedded clauses for better readability.
|
||||||
* We follow the code-conventions and naming-conventions of the current Rust version.
|
* We follow the code-conventions and naming-conventions of the current Rust version.
|
||||||
* We write `clippy`-conform code, so follow `clippy` suggestions where applicable. If you write a compiler-exception (i.e. `#[allow(...)]`) describe your decision to do so in a meaningful comment. We advise to mark this code-segment in the pull-request as a code-comment too.
|
* We write `clippy`-conform code, so follow `clippy` suggestions where applicable. If you write a compiler-exception (i.e. `#[allow(...)]`) describe your decision to do so in a meaningful comment. We advise to mark this code-segment in the pull-request as a code-comment too.
|
||||||
* `rustdoc` is obligatory for crate-exposed structures (e.g. `enum`, `struct`, `fn`, ...).
|
* `rustdoc` is obligatory for crate-exposed structures (e.g. `enum`, `struct`, `fn`, ...).
|
||||||
|
|||||||
139
.github/workflows/combine-prs.yml
vendored
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
name: 'Combine PRs'
|
||||||
|
|
||||||
|
# Controls when the action will run - in this case triggered manually
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
branchPrefix:
|
||||||
|
description: 'Branch prefix to find combinable PRs based on'
|
||||||
|
required: true
|
||||||
|
default: 'dependabot'
|
||||||
|
mustBeGreen:
|
||||||
|
description: 'Only combine PRs that are green (status is success)'
|
||||||
|
required: true
|
||||||
|
default: true
|
||||||
|
combineBranchName:
|
||||||
|
description: 'Name of the branch to combine PRs into'
|
||||||
|
required: true
|
||||||
|
default: 'combine-prs-branch'
|
||||||
|
ignoreLabel:
|
||||||
|
description: 'Exclude PRs with this label'
|
||||||
|
required: true
|
||||||
|
default: 'nocombine'
|
||||||
|
|
||||||
|
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||||
|
jobs:
|
||||||
|
# This workflow contains a single job called "combine-prs"
|
||||||
|
combine-prs:
|
||||||
|
# The type of runner that the job will run on
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||||
|
steps:
|
||||||
|
- uses: actions/github-script@v3
|
||||||
|
id: fetch-branch-names
|
||||||
|
name: Fetch branch names
|
||||||
|
with:
|
||||||
|
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
script: |
|
||||||
|
const pulls = await github.paginate('GET /repos/:owner/:repo/pulls', {
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo
|
||||||
|
});
|
||||||
|
branches = [];
|
||||||
|
prs = [];
|
||||||
|
base_branch = null;
|
||||||
|
for (const pull of pulls) {
|
||||||
|
const branch = pull['head']['ref'];
|
||||||
|
console.log('Pull for branch: ' + branch);
|
||||||
|
if (branch.startsWith('${{ github.event.inputs.branchPrefix }}')) {
|
||||||
|
console.log('Branch matched: ' + branch);
|
||||||
|
statusOK = true;
|
||||||
|
if(${{ github.event.inputs.mustBeGreen }}) {
|
||||||
|
console.log('Checking green status: ' + branch);
|
||||||
|
const statuses = await github.paginate('GET /repos/{owner}/{repo}/commits/{ref}/status', {
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
ref: branch
|
||||||
|
});
|
||||||
|
if(statuses.length > 0) {
|
||||||
|
const latest_status = statuses[0]['state'];
|
||||||
|
console.log('Validating status: ' + latest_status);
|
||||||
|
if(latest_status != 'success') {
|
||||||
|
console.log('Discarding ' + branch + ' with status ' + latest_status);
|
||||||
|
statusOK = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log('Checking labels: ' + branch);
|
||||||
|
const labels = pull['labels'];
|
||||||
|
for(const label of labels) {
|
||||||
|
const labelName = label['name'];
|
||||||
|
console.log('Checking label: ' + labelName);
|
||||||
|
if(labelName == '${{ github.event.inputs.ignoreLabel }}') {
|
||||||
|
console.log('Discarding ' + branch + ' with label ' + labelName);
|
||||||
|
statusOK = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (statusOK) {
|
||||||
|
console.log('Adding branch to array: ' + branch);
|
||||||
|
branches.push(branch);
|
||||||
|
prs.push('#' + pull['number'] + ' ' + pull['title']);
|
||||||
|
base_branch = pull['base']['ref'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (branches.length == 0) {
|
||||||
|
core.setFailed('No PRs/branches matched criteria');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
core.setOutput('base-branch', base_branch);
|
||||||
|
core.setOutput('prs-string', prs.join('\n'));
|
||||||
|
|
||||||
|
combined = branches.join(' ')
|
||||||
|
console.log('Combined: ' + combined);
|
||||||
|
return combined
|
||||||
|
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||||
|
- uses: actions/checkout@v2.3.3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
# Creates a branch with other PR branches merged together
|
||||||
|
- name: Created combined branch
|
||||||
|
env:
|
||||||
|
BASE_BRANCH: ${{ steps.fetch-branch-names.outputs.base-branch }}
|
||||||
|
BRANCHES_TO_COMBINE: ${{ steps.fetch-branch-names.outputs.result }}
|
||||||
|
COMBINE_BRANCH_NAME: ${{ github.event.inputs.combineBranchName }}
|
||||||
|
run: |
|
||||||
|
echo "$BRANCHES_TO_COMBINE"
|
||||||
|
sourcebranches="${BRANCHES_TO_COMBINE%\"}"
|
||||||
|
sourcebranches="${sourcebranches#\"}"
|
||||||
|
|
||||||
|
basebranch="${BASE_BRANCH%\"}"
|
||||||
|
basebranch="${basebranch#\"}"
|
||||||
|
|
||||||
|
git config pull.rebase false
|
||||||
|
git config user.name github-actions
|
||||||
|
git config user.email github-actions@github.com
|
||||||
|
|
||||||
|
git branch $COMBINE_BRANCH_NAME $basebranch
|
||||||
|
git checkout $COMBINE_BRANCH_NAME
|
||||||
|
git pull origin $sourcebranches --no-edit
|
||||||
|
git push origin $COMBINE_BRANCH_NAME
|
||||||
|
# Creates a PR with the new combined branch
|
||||||
|
- uses: actions/github-script@v3
|
||||||
|
name: Create Combined Pull Request
|
||||||
|
env:
|
||||||
|
PRS_STRING: ${{ steps.fetch-branch-names.outputs.prs-string }}
|
||||||
|
with:
|
||||||
|
github-token: ${{secrets.GITHUB_TOKEN}}
|
||||||
|
script: |
|
||||||
|
const prString = process.env.PRS_STRING;
|
||||||
|
const body = 'This PR was created by the Combine PRs action by combining the following PRs:\n' + prString;
|
||||||
|
await github.pulls.create({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
title: 'Combined PR',
|
||||||
|
head: '${{ github.event.inputs.combineBranchName }}',
|
||||||
|
base: '${{ steps.fetch-branch-names.outputs.base-branch }}',
|
||||||
|
body: body
|
||||||
|
});
|
||||||
14
.github/workflows/devskim.yml
vendored
@ -7,28 +7,28 @@ name: DevSkim
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches: [ main ]
|
branches: [ "main" ]
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ main ]
|
branches: [ "main" ]
|
||||||
schedule:
|
schedule:
|
||||||
- cron: '26 6 * * 5'
|
- cron: '15 6 * * 4'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
lint:
|
lint:
|
||||||
name: DevSkim
|
name: DevSkim
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-latest
|
||||||
permissions:
|
permissions:
|
||||||
actions: read
|
actions: read
|
||||||
contents: read
|
contents: read
|
||||||
security-events: write
|
security-events: write
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Run DevSkim scanner
|
- name: Run DevSkim scanner
|
||||||
uses: microsoft/DevSkim-Action@v1
|
uses: microsoft/DevSkim-Action@v1
|
||||||
|
|
||||||
- name: Upload DevSkim scan results to GitHub Security tab
|
- name: Upload DevSkim scan results to GitHub Security tab
|
||||||
uses: github/codeql-action/upload-sarif@v1
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
with:
|
with:
|
||||||
sarif_file: devskim-results.sarif
|
sarif_file: devskim-results.sarif
|
||||||
|
|||||||
3
.github/workflows/pr.yml
vendored
@ -4,6 +4,7 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
|
- develop
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUST_BACKTRACE: 1
|
RUST_BACKTRACE: 1
|
||||||
@ -28,6 +29,7 @@ jobs:
|
|||||||
- run: cargo test --verbose --workspace
|
- run: cargo test --verbose --workspace
|
||||||
- run: cargo test --verbose --workspace --all-features
|
- run: cargo test --verbose --workspace --all-features
|
||||||
- run: cargo test --verbose --workspace --no-default-features
|
- run: cargo test --verbose --workspace --no-default-features
|
||||||
|
- run: cargo test --verbose --workspace --no-default-features -F benchmark
|
||||||
|
|
||||||
clippy:
|
clippy:
|
||||||
name: Lint with clippy
|
name: Lint with clippy
|
||||||
@ -42,6 +44,7 @@ jobs:
|
|||||||
- run: cargo clippy --workspace --all-targets --verbose
|
- run: cargo clippy --workspace --all-targets --verbose
|
||||||
- run: cargo clippy --workspace --all-targets --verbose --no-default-features
|
- run: cargo clippy --workspace --all-targets --verbose --no-default-features
|
||||||
- run: cargo clippy --workspace --all-targets --verbose --all-features
|
- run: cargo clippy --workspace --all-targets --verbose --all-features
|
||||||
|
- run: cargo clippy --workspace --all-targets --verbose --no-default-features -F benchmark
|
||||||
|
|
||||||
rustfmt:
|
rustfmt:
|
||||||
name: Verify code formatting
|
name: Verify code formatting
|
||||||
|
|||||||
6
.gitignore
vendored
@ -21,3 +21,9 @@ tramp
|
|||||||
*_flymake*
|
*_flymake*
|
||||||
|
|
||||||
/tests/out/
|
/tests/out/
|
||||||
|
|
||||||
|
# Ignore direnv data
|
||||||
|
/.direnv/
|
||||||
|
|
||||||
|
# ignore perfdata
|
||||||
|
perf.data
|
||||||
62
CITATION.cff
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
# This CITATION.cff file was generated with cffinit.
|
||||||
|
# Visit https://bit.ly/cffinit to generate yours today!
|
||||||
|
|
||||||
|
cff-version: 1.2.0
|
||||||
|
title: >-
|
||||||
|
Abstract Dialectical Frameworks solved by Binary
|
||||||
|
Decision Diagrams; developed in Dresden (ADF-BDD)
|
||||||
|
message: >-
|
||||||
|
If you use this software, please cite it using the
|
||||||
|
metadata from this file. Note that related conference papers are accepted will appear soon.
|
||||||
|
type: software
|
||||||
|
authors:
|
||||||
|
- given-names: Stefan
|
||||||
|
family-names: Ellmauthaler
|
||||||
|
email: stefan.ellmauthaler@tu-dresden.de
|
||||||
|
affiliation: 'KBS, TU Dresden'
|
||||||
|
orcid: 'https://orcid.org/0000-0003-3882-4286'
|
||||||
|
repository-code: 'https://github.com/ellmau/adf-obdd'
|
||||||
|
url: 'https://ellmau.github.io/adf-obdd/'
|
||||||
|
abstract: >-
|
||||||
|
Solver for ADFs grounded, complete, and stable
|
||||||
|
semantics by utilising OBDDs - ordered binary
|
||||||
|
|
||||||
|
decision diagrams.
|
||||||
|
keywords:
|
||||||
|
- binary decision diagrams
|
||||||
|
- argumentation frameworks
|
||||||
|
- argumentation tools
|
||||||
|
license: MIT
|
||||||
|
commit: 35bb36bfc5ee47b2ad864ead48907fdca5fc5ec4
|
||||||
|
version: v0.2.4-beta.1
|
||||||
|
date-released: '2022-04-22'
|
||||||
|
preferred-citation:
|
||||||
|
authors:
|
||||||
|
- given-names: Stefan
|
||||||
|
family-names: Ellmauthaler
|
||||||
|
email: stefan.ellmauthaler@tu-dresden.de
|
||||||
|
affiliation: 'KBS, TU Dresden'
|
||||||
|
orcid: 'https://orcid.org/0000-0003-3882-4286'
|
||||||
|
- given-names: Sarah Allice
|
||||||
|
family-names: Gaggl
|
||||||
|
email: sarah.gaggl@tu-dresden.de
|
||||||
|
affiliation: 'TU Dresden'
|
||||||
|
orcid: 'https://orcid.org/0000-0003-2425-6089'
|
||||||
|
- given-names: Dominik
|
||||||
|
family-names: Rusovac
|
||||||
|
email: dominik.rusovac@tu-dresden.de
|
||||||
|
affiliation: 'TU Dresden'
|
||||||
|
orcid: 'https://orcid.org/0000-0002-3172-5827'
|
||||||
|
- given-names: Johannes Peter
|
||||||
|
family-names: Wallner
|
||||||
|
email: wallner@ist.tugraz.at
|
||||||
|
affiliation: 'TU Graz'
|
||||||
|
orcid: 'https://orcid.org/0000-0002-3051-1966'
|
||||||
|
title: "ADF-BDD: An ADF Solver Based on Binary Decision Diagrams"
|
||||||
|
type: conference
|
||||||
|
conference:
|
||||||
|
name: 9th International Conference on Computational Models of Argument
|
||||||
|
location: Cardiff
|
||||||
|
alias: COMMA
|
||||||
|
website: 'https://comma22.cs.cf.ac.uk/'
|
||||||
|
year: 2022
|
||||||
3508
Cargo.lock
generated
@ -1,3 +1,7 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members=[ "lib", "bin" ]
|
members=[ "lib", "bin", "server" ]
|
||||||
default-members = [ "lib" ]
|
default-members = [ "lib" ]
|
||||||
|
|
||||||
|
[profile.release]
|
||||||
|
lto = "fat"
|
||||||
|
codegen-units = 1
|
||||||
|
|||||||
36
Dockerfile
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
# 1. BUILD-CONTAINER: Frontend
|
||||||
|
FROM node:hydrogen-alpine
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
COPY ./frontend /root
|
||||||
|
|
||||||
|
RUN yarn && yarn build
|
||||||
|
|
||||||
|
# 2. BUILD-CONTAINER: Server
|
||||||
|
FROM rust:alpine
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
RUN apk add --no-cache musl-dev
|
||||||
|
|
||||||
|
COPY ./bin /root/bin
|
||||||
|
COPY ./lib /root/lib
|
||||||
|
COPY ./server /root/server
|
||||||
|
COPY ./Cargo.toml /root/Cargo.toml
|
||||||
|
COPY ./Cargo.lock /root/Cargo.lock
|
||||||
|
|
||||||
|
RUN cargo build --workspace --release
|
||||||
|
|
||||||
|
# 3. RUNTIME-CONTAINER: run server with frontend as assets
|
||||||
|
FROM alpine:latest
|
||||||
|
|
||||||
|
WORKDIR /root
|
||||||
|
|
||||||
|
COPY --from=0 /root/dist /root/assets
|
||||||
|
COPY --from=1 /root/target/release/adf-bdd-server /root/server
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
ENTRYPOINT ["./server"]
|
||||||
|
|
||||||
695
LICENSE
@ -1,674 +1,21 @@
|
|||||||
GNU GENERAL PUBLIC LICENSE
|
MIT License
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
Copyright (c) 2022 Stefan Ellmauthaler
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this license document, but changing it is not allowed.
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
Preamble
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
The GNU General Public License is a free, copyleft license for
|
furnished to do so, subject to the following conditions:
|
||||||
software and other kinds of works.
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
The licenses for most software and other practical works are designed
|
copies or substantial portions of the Software.
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
share and change all versions of a program--to make sure it remains free
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
GNU General Public License for most of our software; it applies also to
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
any other work released this way by its authors. You can apply it to
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
your programs, too.
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
|
||||||
|
|||||||
114
README.md
@ -1,6 +1,26 @@
|
|||||||
 [](https://coveralls.io/github/ellmau/adf-obdd)    [](https://github.com/ellmau/adf-obdd/releases) [](https://github.com/ellmau/adf-obdd/discussions) 
|
[](https://crates.io/crates/adf-bdd-bin)
|
||||||
|
[](https://crates.io/crates/adf_bdd)
|
||||||
|
[](https://docs.rs/adf_bdd/latest/adf_bdd/)
|
||||||
|

|
||||||
|
[](https://coveralls.io/github/ellmau/adf-obdd)
|
||||||
|

|
||||||
|
 
|
||||||
|
[](https://github.com/ellmau/adf-obdd/releases)
|
||||||
|

|
||||||
|
[](https://github.com/ellmau/adf-obdd/discussions) 
|
||||||
|
|
||||||
# Solver for ADFs grounded semantics by utilising OBDDs - ordered binary decision diagrams
|
# Abstract Dialectical Frameworks solved by (ordered) Binary Decision Diagrams; developed in Dresden (ADF-oBDD project)
|
||||||
|
|
||||||
|
This project is currently split into three parts:
|
||||||
|
- a [binary (adf-bdd)](bin), which allows one to easily answer semantics questions on abstract dialectical frameworks
|
||||||
|
- a [library (adf_bdd)](lib), which contains all the necessary algorithms and an open API which compute the answers to the semantics questions
|
||||||
|
- a [server](server) and a [frontend](frontend) to access the solver as a web-service available at https://adf-bdd.dev
|
||||||
|
|
||||||
|
Latest documentation of the API can be found [here](https://docs.rs/adf_bdd/latest/adf_bdd/).
|
||||||
|
The current version of the binary can be downloaded [here](https://github.com/ellmau/adf-obdd/releases).
|
||||||
|
|
||||||
|
|
||||||
|
Do not hesitate to report bugs or ask about features in the [issues-section](https://github.com/ellmau/adf-obdd/issues) or have a conversation about anything of the project in the [discussion space](https://github.com/ellmau/adf-obdd/discussions)
|
||||||
|
|
||||||
|
|
||||||
## Abstract Dialectical Frameworks
|
## Abstract Dialectical Frameworks
|
||||||
@ -8,55 +28,6 @@ An abstract dialectical framework (ADF) consists of abstract statements. Each st
|
|||||||
## Ordered Binary Decision Diagram
|
## Ordered Binary Decision Diagram
|
||||||
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
|
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
|
||||||
|
|
||||||
## Usage of the binary
|
|
||||||
```
|
|
||||||
USAGE:
|
|
||||||
adf_bdd [OPTIONS] <INPUT>
|
|
||||||
|
|
||||||
ARGS:
|
|
||||||
<INPUT> Input filename
|
|
||||||
|
|
||||||
OPTIONS:
|
|
||||||
--an Sorts variables in an alphanumeric manner
|
|
||||||
--com Compute the complete models
|
|
||||||
--counter <COUNTER> Set if the (counter-)models shall be computed and printed,
|
|
||||||
possible values are 'nai' and 'mem' for naive and memoization
|
|
||||||
repectively (only works in hybrid and naive mode)
|
|
||||||
--export <EXPORT> Export the adf-bdd state after parsing and BDD instantiation to
|
|
||||||
the given filename
|
|
||||||
--grd Compute the grounded model
|
|
||||||
-h, --help Print help information
|
|
||||||
--import Import an adf- bdd state instead of an adf
|
|
||||||
--lib <IMPLEMENTATION> choose the bdd implementation of either 'biodivine', 'naive', or
|
|
||||||
hybrid [default: hybrid]
|
|
||||||
--lx Sorts variables in an lexicographic manner
|
|
||||||
-q Sets log verbosity to only errors
|
|
||||||
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
|
|
||||||
-q are not use [env: RUST_LOG=debug]
|
|
||||||
--stm Compute the stable models
|
|
||||||
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
|
|
||||||
--stmrew Compute the stable models with a single-formula rewriting (only
|
|
||||||
hybrid lib-mode)
|
|
||||||
--stmrew2 Compute the stable models with a single-formula rewriting on
|
|
||||||
internal representation(only hybrid lib-mode)
|
|
||||||
-v Sets log verbosity (multiple times means more verbose)
|
|
||||||
-V, --version Print version information
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that import and export only works if the naive library is chosen
|
|
||||||
|
|
||||||
Right now there is no additional information to the computed models, so if you use `--com --grd --stm` as the command line arguments the borders between the results are not obviously communicated.
|
|
||||||
They can be easily identified though:
|
|
||||||
- The computation is always in the same order
|
|
||||||
- grd
|
|
||||||
- com
|
|
||||||
- stm
|
|
||||||
- We know that there is always exactly one grounded model
|
|
||||||
- We know that there always exist at least one complete model (i.e. the grounded one)
|
|
||||||
- We know that there does not need to exist a stable model
|
|
||||||
- We know that every stable model is a complete model too
|
|
||||||
|
|
||||||
|
|
||||||
## Input-file format:
|
## Input-file format:
|
||||||
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
|
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
|
||||||
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
||||||
@ -70,41 +41,22 @@ The binary predicate ac relates each statement to one propositional formula in p
|
|||||||
|
|
||||||
# Features
|
# Features
|
||||||
|
|
||||||
`adhoccounting` will cache the modelcount on-the-fly during the construction of the BDD
|
- `adhoccounting` will cache the modelcount on-the-fly during the construction of the BDD
|
||||||
|
- `adhoccountmodels` allows in addition to compute the models ad-hoc too. Note that the memoization approach for modelcounting does not work correctly if `adhoccounting` is set and `adhoccountmodels` is not.
|
||||||
|
|
||||||
# Development notes
|
# Development notes
|
||||||
Additional information for contribution, testing, and development in general can be found here.
|
Additional information for contribution, testing, and development in general can be found here.
|
||||||
## Contributing to the project
|
## Contributing to the project
|
||||||
You want to help and contribute to the project? That is great. Please see the [contributing guidelines](https://github.com/ellmau/adf-obdd/blob/main/.github/CONTRIBUTING.md) first.
|
You want to help and contribute to the project? That is great. Please see the [contributing guidelines](https://github.com/ellmau/adf-obdd/blob/main/.github/CONTRIBUTING.md) first.
|
||||||
|
|
||||||
## Testing with the `res` folder:
|
# Acknowledgements
|
||||||
To run all the tests placed in the submodule you need to run
|
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
|
||||||
```bash
|
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
|
||||||
$> git submodule init
|
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
|
||||||
```
|
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
|
||||||
at the first time.
|
|
||||||
Afterwards you need to update the content of the submodule to be on the currently used revision by
|
|
||||||
```bash
|
|
||||||
$> git submodule update
|
|
||||||
```
|
|
||||||
|
|
||||||
The tests can be started by using the test-framework of cargo, i.e.
|
# Affiliation
|
||||||
```bash
|
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
|
||||||
$> cargo test
|
|
||||||
```
|
|
||||||
Note that some of the instances are quite big and it might take some time to finish all the tests.
|
|
||||||
If you do not initialise the submodule, tests will "only" run on the other unit-tests and (possibly forthcoming) other integration tests.
|
|
||||||
Due to the way of the generated test-modules you need to call
|
|
||||||
```bash
|
|
||||||
$> cargo clean
|
|
||||||
```
|
|
||||||
if you change some of your test-cases.
|
|
||||||
|
|
||||||
To remove the tests just type
|
# Disclaimer
|
||||||
```bash
|
Hosting content here does not establish any formal or legal relation to TU Dresden.
|
||||||
$> git submodule deinit res/adf-instances
|
|
||||||
```
|
|
||||||
or
|
|
||||||
```bash
|
|
||||||
$> git submodule deinit --all
|
|
||||||
```
|
|
||||||
|
|||||||
@ -1,34 +1,41 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "adf_bdd-solver"
|
name = "adf-bdd-bin"
|
||||||
version = "0.2.1"
|
version = "0.3.0-dev"
|
||||||
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
|
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
license = "GPL-3.0-only"
|
homepage = "https://ellmau.github.io/adf-obdd"
|
||||||
exclude = ["res/", "./flake*", "*.nix", ".envrc", "_config.yml"]
|
repository = "https://github.com/ellmau/adf-obdd"
|
||||||
|
license = "MIT"
|
||||||
|
exclude = ["res/", "./flake*", "*.nix", ".envrc", "_config.yml", "tarpaulin-report.*", "*~"]
|
||||||
description = "Solver for ADFs grounded, complete, and stable semantics by utilising OBDDs - ordered binary decision diagrams"
|
description = "Solver for ADFs grounded, complete, and stable semantics by utilising OBDDs - ordered binary decision diagrams"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "adf_bdd"
|
name = "adf-bdd"
|
||||||
path = "src/main.rs"
|
path = "src/main.rs"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
adf_bdd = { path = "../lib", default-features = false }
|
adf_bdd = { version="0.3.1", path="../lib", default-features = false }
|
||||||
clap = {version = "3.1.5", features = [ "derive", "cargo", "env" ]}
|
clap = {version = "4.3.0", features = [ "derive", "cargo", "env" ]}
|
||||||
log = { version = "0.4", features = [ "max_level_trace", "release_max_level_info" ] }
|
log = { version = "0.4", features = [ "max_level_trace", "release_max_level_info" ] }
|
||||||
serde = { version = "1.0", features = ["derive","rc"] }
|
serde = { version = "1.0", features = ["derive","rc"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
env_logger = "0.9"
|
env_logger = "0.10"
|
||||||
|
strum = { version = "0.24" }
|
||||||
|
crossbeam-channel = "0.5"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
assert_cmd = "2.0"
|
assert_cmd = "2.0"
|
||||||
predicates = "2.1"
|
predicates = "3.0"
|
||||||
assert_fs = "1.0"
|
assert_fs = "1.0"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["adhoccounting", "variablelist", "adf_bdd/default" ]
|
default = ["adhoccounting", "variablelist", "adf_bdd/default", "frontend"]
|
||||||
adhoccounting = ["adf_bdd/adhoccounting"] # count models ad-hoc - disable if counting is not needed
|
adhoccounting = ["adf_bdd/adhoccounting"] # count models ad-hoc - disable if counting is not needed
|
||||||
importexport = ["adf_bdd/importexport"]
|
importexport = ["adf_bdd/importexport"]
|
||||||
variablelist = [ "HashSet", "adf_bdd/variablelist" ]
|
variablelist = [ "HashSet", "adf_bdd/variablelist" ]
|
||||||
HashSet = ["adf_bdd/HashSet"]
|
HashSet = ["adf_bdd/HashSet"]
|
||||||
|
adhoccountmodels = ["adf_bdd/adhoccountmodels"]
|
||||||
|
benchmark = ["adf_bdd/benchmark"]
|
||||||
|
frontend = ["adf_bdd/frontend"]
|
||||||
695
bin/LICENSE
@ -1,674 +1,21 @@
|
|||||||
GNU GENERAL PUBLIC LICENSE
|
MIT License
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
Copyright (c) 2022 Stefan Ellmauthaler
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this license document, but changing it is not allowed.
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
Preamble
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
The GNU General Public License is a free, copyleft license for
|
furnished to do so, subject to the following conditions:
|
||||||
software and other kinds of works.
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
The licenses for most software and other practical works are designed
|
copies or substantial portions of the Software.
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
share and change all versions of a program--to make sure it remains free
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
GNU General Public License for most of our software; it applies also to
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
any other work released this way by its authors. You can apply it to
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
your programs, too.
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
|
||||||
|
|||||||
@ -1,6 +1,15 @@
|
|||||||
 [](https://coveralls.io/github/ellmau/adf-obdd)     
|
[](https://crates.io/crates/adf-bdd-bin)
|
||||||
# Solver for ADFs grounded semantics by utilising OBDDs - ordered binary decision diagrams
|

|
||||||
|
[](https://coveralls.io/github/ellmau/adf-obdd)
|
||||||
|

|
||||||
|

|
||||||
|

|
||||||
|
[](https://github.com/ellmau/adf-obdd/releases)
|
||||||
|
[](https://github.com/ellmau/adf-obdd/discussions)
|
||||||
|

|
||||||
|
|
||||||
|
# Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden (ADF-BDD)
|
||||||
|
This is the readme for the executable solver.
|
||||||
|
|
||||||
## Abstract Dialectical Frameworks
|
## Abstract Dialectical Frameworks
|
||||||
An abstract dialectical framework (ADF) consists of abstract statements. Each statement has an unique label and might be related to other statements (s) in the ADF. This relation is defined by a so-called acceptance condition (ac), which intuitively is a propositional formula, where the variable symbols are the labels of the statements. An interpretation is a three valued function which maps to each statement a truth value (true, false, undecided). We call such an interpretation a model, if each acceptance condition agrees to the interpration.
|
An abstract dialectical framework (ADF) consists of abstract statements. Each statement has an unique label and might be related to other statements (s) in the ADF. This relation is defined by a so-called acceptance condition (ac), which intuitively is a propositional formula, where the variable symbols are the labels of the statements. An interpretation is a three valued function which maps to each statement a truth value (true, false, undecided). We call such an interpretation a model, if each acceptance condition agrees to the interpration.
|
||||||
@ -10,33 +19,46 @@ An ordered binary decision diagram is a normalised representation of binary func
|
|||||||
## Usage
|
## Usage
|
||||||
```
|
```
|
||||||
USAGE:
|
USAGE:
|
||||||
adf_bdd [FLAGS] [OPTIONS] <input>
|
adf-bdd [OPTIONS] <INPUT>
|
||||||
|
|
||||||
FLAGS:
|
|
||||||
--com Compute the complete models
|
|
||||||
--grd Compute the grounded model
|
|
||||||
-h, --help Prints help information
|
|
||||||
--import Import an adf- bdd state instead of an adf
|
|
||||||
-q Sets log verbosity to only errors
|
|
||||||
--an Sorts variables in an alphanumeric manner
|
|
||||||
--lx Sorts variables in an lexicographic manner
|
|
||||||
--stm Compute the stable models
|
|
||||||
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
|
|
||||||
--stmrew Compute the stable models with a single-formula rewriting (only hybrid lib-mode)
|
|
||||||
--stmrew2 Compute the stable models with a single-formula rewriting on internal representation(only hybrid
|
|
||||||
lib-mode)
|
|
||||||
-V, --version Prints version information
|
|
||||||
-v Sets log verbosity (multiple times means more verbose)
|
|
||||||
|
|
||||||
OPTIONS:
|
|
||||||
--export <export> Export the adf-bdd state after parsing and BDD instantiation to the given filename
|
|
||||||
--lib <implementation> choose the bdd implementation of either 'biodivine', 'naive', or hybrid [default:
|
|
||||||
biodivine]
|
|
||||||
--rust_log <rust-log> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
|
|
||||||
[env: RUST_LOG=debug]
|
|
||||||
|
|
||||||
ARGS:
|
ARGS:
|
||||||
<input> Input filename
|
<INPUT> Input filename
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--an Sorts variables in an alphanumeric manner
|
||||||
|
--com Compute the complete models
|
||||||
|
--counter <COUNTER> Set if the (counter-)models shall be computed and printed,
|
||||||
|
possible values are 'nai' and 'mem' for naive and memoization
|
||||||
|
repectively (only works in hybrid and naive mode)
|
||||||
|
--export <EXPORT> Export the adf-bdd state after parsing and BDD instantiation to
|
||||||
|
the given filename
|
||||||
|
--grd Compute the grounded model
|
||||||
|
-h, --help Print help information
|
||||||
|
--heu <HEU> Choose which heuristics shall be used by the nogood-learning
|
||||||
|
approach [possible values: Simple, MinModMinPathsMaxVarImp,
|
||||||
|
MinModMaxVarImpMinPaths]
|
||||||
|
--import Import an adf- bdd state instead of an adf
|
||||||
|
--lib <IMPLEMENTATION> Choose the bdd implementation of either 'biodivine', 'naive', or
|
||||||
|
hybrid [default: hybrid]
|
||||||
|
--lx Sorts variables in an lexicographic manner
|
||||||
|
-q Sets log verbosity to only errors
|
||||||
|
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
|
||||||
|
-q are not use [env: RUST_LOG=debug]
|
||||||
|
--stm Compute the stable models
|
||||||
|
--stmca Compute the stable models with the help of modelcounting using
|
||||||
|
heuristics a
|
||||||
|
--stmcb Compute the stable models with the help of modelcounting using
|
||||||
|
heuristics b
|
||||||
|
--stmng Compute the stable models with the nogood-learning based approach
|
||||||
|
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
|
||||||
|
--stmrew Compute the stable models with a single-formula rewriting (only
|
||||||
|
hybrid lib-mode)
|
||||||
|
--stmrew2 Compute the stable models with a single-formula rewriting on
|
||||||
|
internal representation(only hybrid lib-mode)
|
||||||
|
--twoval Compute the two valued models with the nogood-learning based
|
||||||
|
approach
|
||||||
|
-v Sets log verbosity (multiple times means more verbose)
|
||||||
|
-V, --version Print version information
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that import and export only works if the naive library is chosen
|
Note that import and export only works if the naive library is chosen
|
||||||
@ -65,6 +87,16 @@ The binary predicate ac relates each statement to one propositional formula in p
|
|||||||
- c(f): constant symbol "falsum" - inconsistency/bot
|
- c(f): constant symbol "falsum" - inconsistency/bot
|
||||||
|
|
||||||
# Development notes
|
# Development notes
|
||||||
|
To build the binary, you need to run
|
||||||
|
```bash
|
||||||
|
$> cargo build --workspace --release
|
||||||
|
```
|
||||||
|
|
||||||
|
To build the binary with debug-symbols, run
|
||||||
|
```bash
|
||||||
|
$> cargo build --workspace
|
||||||
|
```
|
||||||
|
|
||||||
To run all the tests placed in the submodule you need to run
|
To run all the tests placed in the submodule you need to run
|
||||||
```bash
|
```bash
|
||||||
$> git submodule init
|
$> git submodule init
|
||||||
@ -95,3 +127,15 @@ or
|
|||||||
```bash
|
```bash
|
||||||
$> git submodule deinit --all
|
$> git submodule deinit --all
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
|
||||||
|
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
|
||||||
|
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
|
||||||
|
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
|
||||||
|
|
||||||
|
# Affiliation
|
||||||
|
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
Hosting content here does not establish any formal or legal relation to TU Dresden.
|
||||||
|
|||||||
117
bin/src/main.rs
@ -17,7 +17,7 @@ In addition some further features, like counter-model counting is not supported
|
|||||||
# Usage
|
# Usage
|
||||||
```plain
|
```plain
|
||||||
USAGE:
|
USAGE:
|
||||||
adf_bdd [OPTIONS] <INPUT>
|
adf-bdd [OPTIONS] <INPUT>
|
||||||
|
|
||||||
ARGS:
|
ARGS:
|
||||||
<INPUT> Input filename
|
<INPUT> Input filename
|
||||||
@ -32,20 +32,29 @@ OPTIONS:
|
|||||||
the given filename
|
the given filename
|
||||||
--grd Compute the grounded model
|
--grd Compute the grounded model
|
||||||
-h, --help Print help information
|
-h, --help Print help information
|
||||||
|
--heu <HEU> Choose which heuristics shall be used by the nogood-learning
|
||||||
|
approach [possible values: Simple, MinModMinPathsMaxVarImp,
|
||||||
|
MinModMaxVarImpMinPaths]
|
||||||
--import Import an adf- bdd state instead of an adf
|
--import Import an adf- bdd state instead of an adf
|
||||||
--lib <IMPLEMENTATION> choose the bdd implementation of either 'biodivine', 'naive', or
|
--lib <IMPLEMENTATION> Choose the bdd implementation of either 'biodivine', 'naive', or
|
||||||
hybrid [default: hybrid]
|
hybrid [default: hybrid]
|
||||||
--lx Sorts variables in an lexicographic manner
|
--lx Sorts variables in an lexicographic manner
|
||||||
-q Sets log verbosity to only errors
|
-q Sets log verbosity to only errors
|
||||||
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
|
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
|
||||||
-q are not use [env: RUST_LOG=debug]
|
-q are not use [env: RUST_LOG=debug]
|
||||||
--stm Compute the stable models
|
--stm Compute the stable models
|
||||||
--stmc Compute the stable models with the help of modelcounting
|
--stmca Compute the stable models with the help of modelcounting using
|
||||||
|
heuristics a
|
||||||
|
--stmcb Compute the stable models with the help of modelcounting using
|
||||||
|
heuristics b
|
||||||
|
--stmng Compute the stable models with the nogood-learning based approach
|
||||||
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
|
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
|
||||||
--stmrew Compute the stable models with a single-formula rewriting (only
|
--stmrew Compute the stable models with a single-formula rewriting (only
|
||||||
hybrid lib-mode)
|
hybrid lib-mode)
|
||||||
--stmrew2 Compute the stable models with a single-formula rewriting on
|
--stmrew2 Compute the stable models with a single-formula rewriting on
|
||||||
internal representation(only hybrid lib-mode)
|
internal representation(only hybrid lib-mode)
|
||||||
|
--twoval Compute the two valued models with the nogood-learning based
|
||||||
|
approach
|
||||||
-v Sets log verbosity (multiple times means more verbose)
|
-v Sets log verbosity (multiple times means more verbose)
|
||||||
-V, --version Print version information
|
-V, --version Print version information
|
||||||
```
|
```
|
||||||
@ -54,7 +63,6 @@ OPTIONS:
|
|||||||
#![deny(
|
#![deny(
|
||||||
missing_debug_implementations,
|
missing_debug_implementations,
|
||||||
missing_copy_implementations,
|
missing_copy_implementations,
|
||||||
missing_copy_implementations,
|
|
||||||
trivial_casts,
|
trivial_casts,
|
||||||
trivial_numeric_casts,
|
trivial_numeric_casts,
|
||||||
unsafe_code
|
unsafe_code
|
||||||
@ -74,60 +82,74 @@ use adf_bdd::adfbiodivine::Adf as BdAdf;
|
|||||||
|
|
||||||
use adf_bdd::parser::AdfParser;
|
use adf_bdd::parser::AdfParser;
|
||||||
use clap::Parser;
|
use clap::Parser;
|
||||||
|
use crossbeam_channel::unbounded;
|
||||||
|
use strum::VariantNames;
|
||||||
|
|
||||||
#[derive(Parser, Debug)]
|
#[derive(Parser, Debug)]
|
||||||
#[clap(author, version, about)]
|
#[command(author, version, about)]
|
||||||
struct App {
|
struct App {
|
||||||
/// Input filename
|
/// Input filename
|
||||||
#[clap(parse(from_os_str))]
|
#[arg(value_parser)]
|
||||||
input: PathBuf,
|
input: PathBuf,
|
||||||
/// Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
|
/// Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and -q are not use
|
||||||
#[clap(long = "rust_log", env)]
|
#[arg(long = "rust_log", env)]
|
||||||
rust_log: Option<String>,
|
rust_log: Option<String>,
|
||||||
/// choose the bdd implementation of either 'biodivine', 'naive', or hybrid
|
/// Choose the bdd implementation of either 'biodivine', 'naive', or hybrid
|
||||||
#[clap(long = "lib", default_value = "hybrid")]
|
#[arg(long = "lib", default_value = "hybrid")]
|
||||||
implementation: String,
|
implementation: String,
|
||||||
/// Sets log verbosity (multiple times means more verbose)
|
/// Sets log verbosity (multiple times means more verbose)
|
||||||
#[clap(short, parse(from_occurrences), group = "verbosity")]
|
#[arg(short, action = clap::builder::ArgAction::Count, group = "verbosity")]
|
||||||
verbose: u8,
|
verbose: u8,
|
||||||
/// Sets log verbosity to only errors
|
/// Sets log verbosity to only errors
|
||||||
#[clap(short, group = "verbosity")]
|
#[arg(short, group = "verbosity")]
|
||||||
quiet: bool,
|
quiet: bool,
|
||||||
/// Sorts variables in an lexicographic manner
|
/// Sorts variables in an lexicographic manner
|
||||||
#[clap(long = "lx", group = "sorting")]
|
#[arg(long = "lx", group = "sorting")]
|
||||||
sort_lex: bool,
|
sort_lex: bool,
|
||||||
/// Sorts variables in an alphanumeric manner
|
/// Sorts variables in an alphanumeric manner
|
||||||
#[clap(long = "an", group = "sorting")]
|
#[arg(long = "an", group = "sorting")]
|
||||||
sort_alphan: bool,
|
sort_alphan: bool,
|
||||||
/// Compute the grounded model
|
/// Compute the grounded model
|
||||||
#[clap(long = "grd")]
|
#[arg(long = "grd")]
|
||||||
grounded: bool,
|
grounded: bool,
|
||||||
/// Compute the stable models
|
/// Compute the stable models
|
||||||
#[clap(long = "stm")]
|
#[arg(long = "stm")]
|
||||||
stable: bool,
|
stable: bool,
|
||||||
/// Compute the stable models with the help of modelcounting
|
/// Compute the stable models with the help of modelcounting using heuristics a
|
||||||
#[clap(long = "stmc")]
|
#[arg(long = "stmca")]
|
||||||
stable_counting: bool,
|
stable_counting_a: bool,
|
||||||
|
/// Compute the stable models with the help of modelcounting using heuristics b
|
||||||
|
#[arg(long = "stmcb")]
|
||||||
|
stable_counting_b: bool,
|
||||||
/// Compute the stable models with a pre-filter (only hybrid lib-mode)
|
/// Compute the stable models with a pre-filter (only hybrid lib-mode)
|
||||||
#[clap(long = "stmpre")]
|
#[arg(long = "stmpre")]
|
||||||
stable_pre: bool,
|
stable_pre: bool,
|
||||||
/// Compute the stable models with a single-formula rewriting (only hybrid lib-mode)
|
/// Compute the stable models with a single-formula rewriting (only hybrid lib-mode)
|
||||||
#[clap(long = "stmrew")]
|
#[arg(long = "stmrew")]
|
||||||
stable_rew: bool,
|
stable_rew: bool,
|
||||||
/// Compute the stable models with a single-formula rewriting on internal representation(only hybrid lib-mode)
|
/// Compute the stable models with a single-formula rewriting on internal representation(only hybrid lib-mode)
|
||||||
#[clap(long = "stmrew2")]
|
#[arg(long = "stmrew2")]
|
||||||
stable_rew2: bool,
|
stable_rew2: bool,
|
||||||
|
/// Compute the stable models with the nogood-learning based approach
|
||||||
|
#[arg(long = "stmng")]
|
||||||
|
stable_ng: bool,
|
||||||
|
/// Choose which heuristics shall be used by the nogood-learning approach
|
||||||
|
#[arg(long, value_parser = clap::builder::PossibleValuesParser::new(adf_bdd::adf::heuristics::Heuristic::VARIANTS.iter().filter(|&v| v != &"Custom").collect::<Vec<_>>()))]
|
||||||
|
heu: Option<adf_bdd::adf::heuristics::Heuristic<'static>>,
|
||||||
|
/// Compute the two valued models with the nogood-learning based approach
|
||||||
|
#[arg(long = "twoval")]
|
||||||
|
two_val: bool,
|
||||||
/// Compute the complete models
|
/// Compute the complete models
|
||||||
#[clap(long = "com")]
|
#[arg(long = "com")]
|
||||||
complete: bool,
|
complete: bool,
|
||||||
/// Import an adf- bdd state instead of an adf
|
/// Import an adf- bdd state instead of an adf
|
||||||
#[clap(long)]
|
#[arg(long)]
|
||||||
import: bool,
|
import: bool,
|
||||||
/// Export the adf-bdd state after parsing and BDD instantiation to the given filename
|
/// Export the adf-bdd state after parsing and BDD instantiation to the given filename
|
||||||
#[clap(long)]
|
#[arg(long)]
|
||||||
export: Option<PathBuf>,
|
export: Option<PathBuf>,
|
||||||
/// Set if the (counter-)models shall be computed and printed, possible values are 'nai' and 'mem' for naive and memoization repectively (only works in hybrid and naive mode)
|
/// Set if the (counter-)models shall be computed and printed, possible values are 'nai' and 'mem' for naive and memoization repectively (only works in hybrid and naive mode)
|
||||||
#[clap(long)]
|
#[arg(long)]
|
||||||
counter: Option<String>,
|
counter: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -158,7 +180,7 @@ impl App {
|
|||||||
let input = std::fs::read_to_string(self.input.clone()).expect("Error Reading File");
|
let input = std::fs::read_to_string(self.input.clone()).expect("Error Reading File");
|
||||||
match self.implementation.as_str() {
|
match self.implementation.as_str() {
|
||||||
"hybrid" => {
|
"hybrid" => {
|
||||||
let parser = adf_bdd::parser::AdfParser::default();
|
let parser = AdfParser::default();
|
||||||
match parser.parse()(&input) {
|
match parser.parse()(&input) {
|
||||||
Ok(_) => log::info!("[Done] parsing"),
|
Ok(_) => log::info!("[Done] parsing"),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -182,14 +204,14 @@ impl App {
|
|||||||
Some("nai") => {
|
Some("nai") => {
|
||||||
let naive_adf = adf.hybrid_step_opt(false);
|
let naive_adf = adf.hybrid_step_opt(false);
|
||||||
for ac_counts in naive_adf.formulacounts(false) {
|
for ac_counts in naive_adf.formulacounts(false) {
|
||||||
print!("{:?} ", ac_counts);
|
print!("{ac_counts:?} ");
|
||||||
}
|
}
|
||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
Some("mem") => {
|
Some("mem") => {
|
||||||
let naive_adf = adf.hybrid_step_opt(false);
|
let naive_adf = adf.hybrid_step_opt(false);
|
||||||
for ac_counts in naive_adf.formulacounts(true) {
|
for ac_counts in naive_adf.formulacounts(true) {
|
||||||
print!("{:?}", ac_counts);
|
print!("{ac_counts:?}");
|
||||||
}
|
}
|
||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
@ -212,14 +234,28 @@ impl App {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self.two_val {
|
||||||
|
let (sender, receiver) = unbounded();
|
||||||
|
naive_adf.two_val_nogood_channel(self.heu.unwrap_or_default(), sender);
|
||||||
|
for model in receiver.into_iter() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if self.stable {
|
if self.stable {
|
||||||
for model in naive_adf.stable() {
|
for model in naive_adf.stable() {
|
||||||
print!("{}", printer.print_interpretation(&model));
|
print!("{}", printer.print_interpretation(&model));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.stable_counting {
|
if self.stable_counting_a {
|
||||||
for model in naive_adf.stable_count_optimisation() {
|
for model in naive_adf.stable_count_optimisation_heu_a() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.stable_counting_b {
|
||||||
|
for model in naive_adf.stable_count_optimisation_heu_b() {
|
||||||
print!("{}", printer.print_interpretation(&model));
|
print!("{}", printer.print_interpretation(&model));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -235,12 +271,18 @@ impl App {
|
|||||||
print!("{}", printer.print_interpretation(&model));
|
print!("{}", printer.print_interpretation(&model));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self.stable_ng {
|
||||||
|
for model in naive_adf.stable_nogood(self.heu.unwrap_or_default()) {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
"biodivine" => {
|
"biodivine" => {
|
||||||
if self.counter.is_some() {
|
if self.counter.is_some() {
|
||||||
log::error!("Modelcounting not supported in biodivine mode");
|
log::error!("Modelcounting not supported in biodivine mode");
|
||||||
}
|
}
|
||||||
let parser = adf_bdd::parser::AdfParser::default();
|
let parser = AdfParser::default();
|
||||||
match parser.parse()(&input) {
|
match parser.parse()(&input) {
|
||||||
Ok(_) => log::info!("[Done] parsing"),
|
Ok(_) => log::info!("[Done] parsing"),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -326,7 +368,7 @@ impl App {
|
|||||||
export.to_string_lossy()
|
export.to_string_lossy()
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
let export_file = match File::create(&export) {
|
let export_file = match File::create(export) {
|
||||||
Err(reason) => {
|
Err(reason) => {
|
||||||
panic!("couldn't create {}: {}", export.to_string_lossy(), reason)
|
panic!("couldn't create {}: {}", export.to_string_lossy(), reason)
|
||||||
}
|
}
|
||||||
@ -341,13 +383,13 @@ impl App {
|
|||||||
match self.counter.as_deref() {
|
match self.counter.as_deref() {
|
||||||
Some("nai") => {
|
Some("nai") => {
|
||||||
for ac_counts in adf.formulacounts(false) {
|
for ac_counts in adf.formulacounts(false) {
|
||||||
print!("{:?} ", ac_counts);
|
print!("{ac_counts:?} ");
|
||||||
}
|
}
|
||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
Some("mem") => {
|
Some("mem") => {
|
||||||
for ac_counts in adf.formulacounts(true) {
|
for ac_counts in adf.formulacounts(true) {
|
||||||
print!("{:?}", ac_counts);
|
print!("{ac_counts:?}");
|
||||||
}
|
}
|
||||||
println!();
|
println!();
|
||||||
}
|
}
|
||||||
@ -371,6 +413,13 @@ impl App {
|
|||||||
print!("{}", printer.print_interpretation(&model));
|
print!("{}", printer.print_interpretation(&model));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self.stable_ng {
|
||||||
|
let printer = adf.print_dictionary();
|
||||||
|
for model in adf.stable_nogood(self.heu.unwrap_or_default()) {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,29 +5,29 @@ use std::process::Command; // Run programs
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn arguments() -> Result<(), Box<dyn std::error::Error>> {
|
fn arguments() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
let mut cmd = Command::cargo_bin("adf_bdd")?;
|
let mut cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg("-vvv").arg("--lx").arg("file.txt");
|
cmd.arg("-vvv").arg("--lx").arg("file.txt");
|
||||||
cmd.assert()
|
cmd.assert()
|
||||||
.failure()
|
.failure()
|
||||||
.stderr(predicate::str::contains("No such file or directory"));
|
.stderr(predicate::str::contains("No such file or directory"));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg("-v").arg("--lx").arg("--an").arg("file.txt");
|
cmd.arg("-v").arg("--lx").arg("--an").arg("file.txt");
|
||||||
cmd.assert().failure().stderr(predicate::str::contains(
|
cmd.assert().failure().stderr(predicate::str::contains(
|
||||||
"The argument '--lx' cannot be used with '--an'",
|
"argument '--lx' cannot be used with '--an'",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg("-h");
|
cmd.arg("-h");
|
||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert()
|
||||||
"stefan.ellmauthaler@tu-dresden.de",
|
.success()
|
||||||
));
|
.stdout(predicate::str::contains("adf-bdd [OPTIONS] <INPUT>"));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg("--version");
|
cmd.arg("--version");
|
||||||
cmd.assert()
|
cmd.assert()
|
||||||
.success()
|
.success()
|
||||||
.stdout(predicate::str::contains("adf_bdd-solver "));
|
.stdout(predicate::str::contains("adf-bdd-bin "));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -38,14 +38,14 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
|
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
|
||||||
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
|
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
|
||||||
|
|
||||||
let mut cmd = Command::cargo_bin("adf_bdd")?;
|
let mut cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
|
|
||||||
cmd.arg(wrong_file.path());
|
cmd.arg(wrong_file.path());
|
||||||
cmd.assert()
|
cmd.assert()
|
||||||
.failure()
|
.failure()
|
||||||
.stderr(predicate::str::contains("code: Eof"));
|
.stderr(predicate::str::contains("code: Eof"));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("-vv")
|
.arg("-vv")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -55,7 +55,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("-q")
|
.arg("-q")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -65,7 +65,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--lx")
|
.arg("--lx")
|
||||||
.arg("-v")
|
.arg("-v")
|
||||||
@ -76,7 +76,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
|
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -87,7 +87,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.env_clear();
|
cmd.env_clear();
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
@ -98,7 +98,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -110,7 +110,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -124,7 +124,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
|
|
||||||
let tempdir = assert_fs::TempDir::new()?;
|
let tempdir = assert_fs::TempDir::new()?;
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -136,7 +136,7 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -148,18 +148,21 @@ fn runs_naive() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
#[cfg(feature = "importexport")]
|
||||||
cmd.arg(tempdir.path().with_file_name("test.json"))
|
{
|
||||||
.arg("--an")
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
.arg("--grd")
|
cmd.arg(tempdir.path().with_file_name("test.json"))
|
||||||
.arg("--import")
|
.arg("--an")
|
||||||
.arg("--lib")
|
.arg("--grd")
|
||||||
.arg("naive");
|
.arg("--import")
|
||||||
cmd.assert().success().stdout(predicate::str::contains(
|
.arg("--lib")
|
||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
.arg("naive");
|
||||||
));
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--com")
|
.arg("--com")
|
||||||
@ -180,45 +183,45 @@ fn runs_biodivine() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
|
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
|
||||||
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
|
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
|
||||||
|
|
||||||
let mut cmd = Command::cargo_bin("adf_bdd")?;
|
let mut cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
|
|
||||||
cmd.arg(wrong_file.path());
|
cmd.arg(wrong_file.path());
|
||||||
cmd.assert()
|
cmd.assert()
|
||||||
.failure()
|
.failure()
|
||||||
.stderr(predicate::str::contains("code: Eof"));
|
.stderr(predicate::str::contains("code: Eof"));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path()).arg("-vv").arg("--grd");
|
cmd.arg(file.path()).arg("-vv").arg("--grd");
|
||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path()).arg("-q").arg("--grd");
|
cmd.arg(file.path()).arg("-q").arg("--grd");
|
||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path()).arg("--lx").arg("-v").arg("--grd");
|
cmd.arg(file.path()).arg("--lx").arg("-v").arg("--grd");
|
||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
|
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path()).arg("--an").arg("--grd").arg("--stm");
|
cmd.arg(file.path()).arg("--an").arg("--grd").arg("--stm");
|
||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.env_clear();
|
cmd.env_clear();
|
||||||
cmd.arg(file.path()).arg("--an").arg("--grd");
|
cmd.arg(file.path()).arg("--an").arg("--grd");
|
||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -228,7 +231,7 @@ fn runs_biodivine() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -237,7 +240,7 @@ fn runs_biodivine() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--com")
|
.arg("--com")
|
||||||
@ -256,14 +259,14 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
|
let wrong_file = assert_fs::NamedTempFile::new("wrong_format.adf")?;
|
||||||
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
|
wrong_file.write_str("s(7).s(4).s(8).s(3).s(5).s(9).s(10).s(1).s(6).s(2).ac(7,or(or(and(7,neg(1)),neg(9)),3)).ac(4,5).ac(8,or(or(8,1),neg(7))).ac(3,or(and(or(6,7),neg(and(6,7))),neg(2))).ac(5,c(f)).ac(9,and(neg(7),2)).ac(10,or(neg(2),6)).ac(1,and(or(or(neg(2),neg(1)),8),7)).ac(6,and(and(neg(2),10),and(or(7,4),neg(and(7,4))))).ac(2,and(and(and(neg(10),3),neg(6)),or(9,1)))).")?;
|
||||||
|
|
||||||
let mut cmd = Command::cargo_bin("adf_bdd")?;
|
let mut cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
|
|
||||||
cmd.arg(wrong_file.path());
|
cmd.arg(wrong_file.path());
|
||||||
cmd.assert()
|
cmd.assert()
|
||||||
.failure()
|
.failure()
|
||||||
.stderr(predicate::str::contains("code: Eof"));
|
.stderr(predicate::str::contains("code: Eof"));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("-vv")
|
.arg("-vv")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -273,7 +276,7 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("-q")
|
.arg("-q")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -283,7 +286,7 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
"u(7) F(4) u(8) u(3) F(5) u(9) u(10) u(1) u(6) u(2)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--lx")
|
.arg("--lx")
|
||||||
.arg("-v")
|
.arg("-v")
|
||||||
@ -294,7 +297,7 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
|
"u(1) u(10) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9)",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -305,7 +308,7 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.env_clear();
|
cmd.env_clear();
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
@ -316,7 +319,7 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -328,7 +331,7 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
|
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--grd")
|
.arg("--grd")
|
||||||
@ -339,7 +342,7 @@ fn runs_biodivine_hybrid() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
cmd.assert().success().stdout(predicate::str::contains(
|
cmd.assert().success().stdout(predicate::str::contains(
|
||||||
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
"u(1) u(2) u(3) F(4) F(5) u(6) u(7) u(8) u(9) u(10) \n",
|
||||||
));
|
));
|
||||||
cmd = Command::cargo_bin("adf_bdd")?;
|
cmd = Command::cargo_bin("adf-bdd")?;
|
||||||
cmd.arg(file.path())
|
cmd.arg(file.path())
|
||||||
.arg("--an")
|
.arg("--an")
|
||||||
.arg("--com")
|
.arg("--com")
|
||||||
|
|||||||
3
docs/_config.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
theme: jekyll-theme-architect
|
||||||
|
show_downloads: false
|
||||||
|
markdown: kramdown
|
||||||
139
docs/adf-bdd.md
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
[](https://crates.io/crates/adf_bdd)
|
||||||
|
[](https://docs.rs/adf_bdd/latest/adf_bdd/)
|
||||||
|

|
||||||
|
[](https://coveralls.io/github/ellmau/adf-obdd)
|
||||||
|

|
||||||
|
 
|
||||||
|
[](https://github.com/ellmau/adf-obdd/releases)
|
||||||
|

|
||||||
|
[](https://github.com/ellmau/adf-obdd/discussions) 
|
||||||
|
|
||||||
|
| [Home](index.md) | [Binary](adf-bdd.md) | [Library](adf_bdd.md)| [Web-Service](https://adf-bdd.dev) | [Repository](https://github.com/ellmau/adf-obdd) |
|
||||||
|
|--- | --- | --- | --- | --- |
|
||||||
|
|
||||||
|
# Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden (ADF-BDD)
|
||||||
|
This is the readme for the executable solver.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
```
|
||||||
|
USAGE:
|
||||||
|
adf-bdd [OPTIONS] <INPUT>
|
||||||
|
|
||||||
|
ARGS:
|
||||||
|
<INPUT> Input filename
|
||||||
|
|
||||||
|
OPTIONS:
|
||||||
|
--an Sorts variables in an alphanumeric manner
|
||||||
|
--com Compute the complete models
|
||||||
|
--counter <COUNTER> Set if the (counter-)models shall be computed and printed,
|
||||||
|
possible values are 'nai' and 'mem' for naive and memoization
|
||||||
|
repectively (only works in hybrid and naive mode)
|
||||||
|
--export <EXPORT> Export the adf-bdd state after parsing and BDD instantiation to
|
||||||
|
the given filename
|
||||||
|
--grd Compute the grounded model
|
||||||
|
-h, --help Print help information
|
||||||
|
--heu <HEU> Choose which heuristics shall be used by the nogood-learning
|
||||||
|
approach [possible values: Simple, MinModMinPathsMaxVarImp,
|
||||||
|
MinModMaxVarImpMinPaths]
|
||||||
|
--import Import an adf- bdd state instead of an adf
|
||||||
|
--lib <IMPLEMENTATION> Choose the bdd implementation of either 'biodivine', 'naive', or
|
||||||
|
hybrid [default: hybrid]
|
||||||
|
--lx Sorts variables in an lexicographic manner
|
||||||
|
-q Sets log verbosity to only errors
|
||||||
|
--rust_log <RUST_LOG> Sets the verbosity to 'warn', 'info', 'debug' or 'trace' if -v and
|
||||||
|
-q are not use [env: RUST_LOG=debug]
|
||||||
|
--stm Compute the stable models
|
||||||
|
--stmca Compute the stable models with the help of modelcounting using
|
||||||
|
heuristics a
|
||||||
|
--stmcb Compute the stable models with the help of modelcounting using
|
||||||
|
heuristics b
|
||||||
|
--stmng Compute the stable models with the nogood-learning based approach
|
||||||
|
--stmpre Compute the stable models with a pre-filter (only hybrid lib-mode)
|
||||||
|
--stmrew Compute the stable models with a single-formula rewriting (only
|
||||||
|
hybrid lib-mode)
|
||||||
|
--stmrew2 Compute the stable models with a single-formula rewriting on
|
||||||
|
internal representation(only hybrid lib-mode)
|
||||||
|
--twoval Compute the two valued models with the nogood-learning based
|
||||||
|
approach
|
||||||
|
-v Sets log verbosity (multiple times means more verbose)
|
||||||
|
-V, --version Print version information
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that import and export only works if the naive library is chosen
|
||||||
|
|
||||||
|
Right now there is no additional information to the computed models, so if you use --com --grd --stm the borders between the results are not obviously communicated.
|
||||||
|
They can be easily identified though:
|
||||||
|
- The computation is always in the same order
|
||||||
|
- grd
|
||||||
|
- com
|
||||||
|
- stm
|
||||||
|
- We know that there is always exactly one grounded model
|
||||||
|
- We know that there always exist at least one complete model (i.e. the grounded one)
|
||||||
|
- We know that there does not need to exist a stable model
|
||||||
|
- We know that every stable model is a complete model too
|
||||||
|
|
||||||
|
|
||||||
|
## Input-file format:
|
||||||
|
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
|
||||||
|
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
||||||
|
- and(x,y): conjunction
|
||||||
|
- or(x,y): disjunctin
|
||||||
|
- iff(x,Y): if and only if
|
||||||
|
- xor(x,y): exclusive or
|
||||||
|
- neg(x): classical negation
|
||||||
|
- c(v): constant symbol "verum" - tautology/top
|
||||||
|
- c(f): constant symbol "falsum" - inconsistency/bot
|
||||||
|
|
||||||
|
# Development notes
|
||||||
|
To build the binary, you need to run
|
||||||
|
```bash
|
||||||
|
$> cargo build --workspace --release
|
||||||
|
```
|
||||||
|
|
||||||
|
To build the binary with debug-symbols, run
|
||||||
|
```bash
|
||||||
|
$> cargo build --workspace
|
||||||
|
```
|
||||||
|
|
||||||
|
To run all the tests placed in the submodule you need to run
|
||||||
|
```bash
|
||||||
|
$> git submodule init
|
||||||
|
```
|
||||||
|
at the first time.
|
||||||
|
Afterwards you need to update the content of the submodule to be on the currently used revision by
|
||||||
|
```bash
|
||||||
|
$> git submodule update
|
||||||
|
```
|
||||||
|
|
||||||
|
The tests can be started by using the test-framework of cargo, i.e.
|
||||||
|
```bash
|
||||||
|
$> cargo test
|
||||||
|
```
|
||||||
|
Note that some of the instances are quite big and it might take some time to finish all the tests.
|
||||||
|
If you do not initialise the submodule, tests will "only" run on the other unit-tests and (possibly forthcoming) other integration tests.
|
||||||
|
Due to the way of the generated test-modules you need to call
|
||||||
|
```bash
|
||||||
|
$> cargo clean
|
||||||
|
```
|
||||||
|
if you change some of your test-cases.
|
||||||
|
|
||||||
|
To remove the tests just type
|
||||||
|
```bash
|
||||||
|
$> git submodule deinit res/adf-instances
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```bash
|
||||||
|
$> git submodule deinit --all
|
||||||
|
```
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
|
||||||
|
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
|
||||||
|
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
|
||||||
|
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
|
||||||
|
|
||||||
|
# Affiliation
|
||||||
|
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
Hosting content here does not establish any formal or legal relation to TU Dresden.
|
||||||
167
docs/adf_bdd.md
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
[](https://crates.io/crates/adf_bdd)
|
||||||
|
[](https://docs.rs/adf_bdd/latest/adf_bdd/)
|
||||||
|

|
||||||
|
[](https://coveralls.io/github/ellmau/adf-obdd)
|
||||||
|

|
||||||
|
 
|
||||||
|
[](https://github.com/ellmau/adf-obdd/releases)
|
||||||
|

|
||||||
|
[](https://github.com/ellmau/adf-obdd/discussions) 
|
||||||
|
|
||||||
|
| [Home](index.md) | [Binary](adf-bdd.md) | [Library](adf_bdd.md)| [Web-Service](https://adf-bdd.dev) | [Repository](https://github.com/ellmau/adf-obdd) |
|
||||||
|
|--- | --- | --- | --- | --- |
|
||||||
|
|
||||||
|
# Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden (ADF_BDD)
|
||||||
|
This library contains an efficient representation of Abstract Dialectical Frameworks (ADf) by utilising an implementation of Ordered Binary Decision Diagrams (OBDD)
|
||||||
|
|
||||||
|
## Noteworthy relations between ADF semantics
|
||||||
|
|
||||||
|
They can be easily identified though:
|
||||||
|
|
||||||
|
* The computation is always in the same order
|
||||||
|
* grd
|
||||||
|
* com
|
||||||
|
* stm
|
||||||
|
* We know that there is always exactly one grounded model
|
||||||
|
* We know that there always exist at least one complete model (i.e. the grounded one)
|
||||||
|
* We know that there does not need to exist a stable model
|
||||||
|
* We know that every stable model is a complete model too
|
||||||
|
|
||||||
|
|
||||||
|
## Ordered Binary Decision Diagram
|
||||||
|
|
||||||
|
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
|
||||||
|
|
||||||
|
Note that one advantage of this implementation is that only one oBDD is used for all acceptance conditions. This can be done because all of them have the identical signature (i.e. the set of all statements + top and bottom concepts). Due to this uniform representation reductions on subformulae which are shared by two or more statements only need to be computed once and is already cached in the data structure for further applications.
|
||||||
|
|
||||||
|
The used algorithm to create a BDD, based on a given formula does not perform well on bigger formulae, therefore it is possible to use a state-of-the art library to instantiate the BDD (https://github.com/sybila/biodivine-lib-bdd). It is possible to either stay with the biodivine library or switch back to the variant implemented by adf-bdd. The variant implemented in this library offers reuse of already done reductions and memoisation techniques, which are not offered by biodivine. In addition some further features, like counter-model counting is not supported by biodivine.
|
||||||
|
|
||||||
|
Note that import and export only works if the naive library is chosen
|
||||||
|
|
||||||
|
## Input-file format:
|
||||||
|
|
||||||
|
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement. The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
||||||
|
```plain
|
||||||
|
and(x,y): conjunction
|
||||||
|
or(x,y): disjunctin
|
||||||
|
iff(x,Y): if and only if
|
||||||
|
xor(x,y): exclusive or
|
||||||
|
neg(x): classical negation
|
||||||
|
c(v): constant symbol “verum” - tautology/top
|
||||||
|
c(f): constant symbol “falsum” - inconsistency/bot
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example input file:
|
||||||
|
```plain
|
||||||
|
s(a).
|
||||||
|
s(b).
|
||||||
|
s(c).
|
||||||
|
s(d).
|
||||||
|
|
||||||
|
ac(a,c(v)).
|
||||||
|
ac(b,or(a,b)).
|
||||||
|
ac(c,neg(b)).
|
||||||
|
ac(d,d).
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage examples
|
||||||
|
|
||||||
|
First parse a given ADF and sort the statements, if needed.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use adf_bdd::parser::AdfParser;
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
// use the above example as input
|
||||||
|
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
match parser.parse()(&input) {
|
||||||
|
Ok(_) => log::info!("[Done] parsing"),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!(
|
||||||
|
"Error during parsing:\n{} \n\n cannot continue, panic!",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
panic!("Parsing failed, see log for further details")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// sort lexicographic
|
||||||
|
parser.varsort_lexi();
|
||||||
|
```
|
||||||
|
use the naive/in-crate implementation
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// create Adf
|
||||||
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
// compute and print the complete models
|
||||||
|
let printer = adf.print_dictionary();
|
||||||
|
for model in adf.complete() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
use the biodivine implementation
|
||||||
|
```rust
|
||||||
|
// create Adf
|
||||||
|
let adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser);
|
||||||
|
// compute and print the complete models
|
||||||
|
let printer = adf.print_dictionary();
|
||||||
|
for model in adf.complete() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
use the hybrid approach implementation
|
||||||
|
```rust
|
||||||
|
// create biodivine Adf
|
||||||
|
let badf = adf_bdd::adfbiodivine::Adf::from_parser(&parser);
|
||||||
|
// instantiate the internally used adf after the reduction done by biodivine
|
||||||
|
let mut adf = badf.hybrid_step();
|
||||||
|
// compute and print the complete models
|
||||||
|
let printer = adf.print_dictionary();
|
||||||
|
for model in adf.complete() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
use the new `NoGood`-based algorithm and utilise the new interface with channels:
|
||||||
|
```rust
|
||||||
|
use adf_bdd::parser::AdfParser;
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
use adf_bdd::adf::heuristics::Heuristic;
|
||||||
|
use adf_bdd::datatypes::{Term, adf::VarContainer};
|
||||||
|
// create a channel
|
||||||
|
let (s, r) = crossbeam_channel::unbounded();
|
||||||
|
let variables = VarContainer::default();
|
||||||
|
let variables_worker = variables.clone();
|
||||||
|
// spawn a solver thread
|
||||||
|
let solving = std::thread::spawn(move || {
|
||||||
|
// use the above example as input
|
||||||
|
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
|
||||||
|
let parser = AdfParser::with_var_container(variables_worker);
|
||||||
|
parser.parse()(&input).expect("parsing worked well");
|
||||||
|
// use hybrid approach
|
||||||
|
let mut adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser).hybrid_step();
|
||||||
|
// compute stable with the simple heuristic
|
||||||
|
adf.stable_nogood_channel(Heuristic::Simple, s);
|
||||||
|
});
|
||||||
|
|
||||||
|
let printer = variables.print_dictionary();
|
||||||
|
// print results as they are computed
|
||||||
|
while let Ok(result) = r.recv() {
|
||||||
|
print!("stable model: {:?} \n", result);
|
||||||
|
// use dictionary
|
||||||
|
print!("stable model with variable names: {}", printer.print_interpretation(&result));
|
||||||
|
}
|
||||||
|
// waiting for the other thread to close
|
||||||
|
solving.join().unwrap();
|
||||||
|
```
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
|
||||||
|
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
|
||||||
|
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
|
||||||
|
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
|
||||||
|
|
||||||
|
# Affiliation
|
||||||
|
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
Hosting content here does not establish any formal or legal relation to TU Dresden.
|
||||||
64
docs/index.md
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
[](https://crates.io/crates/adf_bdd)
|
||||||
|
[](https://docs.rs/adf_bdd/latest/adf_bdd/)
|
||||||
|

|
||||||
|
[](https://coveralls.io/github/ellmau/adf-obdd)
|
||||||
|

|
||||||
|
 
|
||||||
|
[](https://github.com/ellmau/adf-obdd/releases)
|
||||||
|

|
||||||
|
[](https://github.com/ellmau/adf-obdd/discussions) 
|
||||||
|
|
||||||
|
| [Home](index.md) | [Binary](adf-bdd.md) | [Library](adf_bdd.md)| [Web-Service](https://adf-bdd.dev) | [Repository](https://github.com/ellmau/adf-obdd) |
|
||||||
|
|--- | --- | --- | --- | --- |
|
||||||
|
|
||||||
|
# Abstract Dialectical Frameworks solved by (ordered) Binary Decision Diagrams; developed in Dresden (ADF-oBDD project)
|
||||||
|
|
||||||
|
|
||||||
|
This project is currently split into three parts:
|
||||||
|
- a [binary (adf-bdd)](adf-bdd.md), which allows one to easily answer semantics questions on abstract dialectical frameworks
|
||||||
|
- a [library (adf_bdd)](adf_bdd.md), which contains all the necessary algorithms and an open API which compute the answers to the semantics questions
|
||||||
|
- a server and a frontend, available at https://adf-bdd.dev
|
||||||
|
|
||||||
|
Latest documentation of the API can be found [here](https://docs.rs/adf_bdd/latest/adf_bdd/).
|
||||||
|
The current version of the binary can be downloaded [here](https://github.com/ellmau/adf-obdd/releases).
|
||||||
|
|
||||||
|
Do not hesitate to report bugs or ask about features in the [issues-section](https://github.com/ellmau/adf-obdd/issues) or have a conversation about anything of the project in the [discussion space](https://github.com/ellmau/adf-obdd/discussions)
|
||||||
|
|
||||||
|
|
||||||
|
## Abstract Dialectical Frameworks
|
||||||
|
An abstract dialectical framework (ADF) consists of abstract statements. Each statement has an unique label and might be related to other statements (s) in the ADF. This relation is defined by a so-called acceptance condition (ac), which intuitively is a propositional formula, where the variable symbols are the labels of the statements. An interpretation is a three valued function which maps to each statement a truth value (true, false, undecided). We call such an interpretation a model, if each acceptance condition agrees to the interpration.
|
||||||
|
## Ordered Binary Decision Diagram
|
||||||
|
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
|
||||||
|
|
||||||
|
## Input-file format:
|
||||||
|
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
|
||||||
|
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
||||||
|
- and(x,y): conjunction
|
||||||
|
- or(x,y): disjunctin
|
||||||
|
- iff(x,Y): if and only if
|
||||||
|
- xor(x,y): exclusive or
|
||||||
|
- neg(x): classical negation
|
||||||
|
- c(v): constant symbol "verum" - tautology/top
|
||||||
|
- c(f): constant symbol "falsum" - inconsistency/bot
|
||||||
|
|
||||||
|
# Features
|
||||||
|
|
||||||
|
- `adhoccounting` will cache the modelcount on-the-fly during the construction of the BDD
|
||||||
|
- `adhoccountmodels` allows in addition to compute the models ad-hoc too. Note that the memoization approach for modelcounting does not work correctly if `adhoccounting` is set and `adhoccountmodels` is not.
|
||||||
|
|
||||||
|
# Development notes
|
||||||
|
Additional information for contribution, testing, and development in general can be found here.
|
||||||
|
## Contributing to the project
|
||||||
|
You want to help and contribute to the project? That is great. Please see the [contributing guidelines](https://github.com/ellmau/adf-obdd/blob/main/.github/CONTRIBUTING.md) first.
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
|
||||||
|
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
|
||||||
|
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
|
||||||
|
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
|
||||||
|
|
||||||
|
# Affiliation
|
||||||
|
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
Hosting content here does not establish any formal or legal relation to TU Dresden.
|
||||||
110
flake.lock
generated
@ -1,43 +1,33 @@
|
|||||||
{
|
{
|
||||||
"nodes": {
|
"nodes": {
|
||||||
"flake-compat": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1641205782,
|
|
||||||
"narHash": "sha256-4jY7RCWUoZ9cKD8co0/4tFARpWB+57+r1bLLvXNJliY=",
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"rev": "b7547d3eed6f32d06102ead8991ec52ab0a4f1a7",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "edolstra",
|
|
||||||
"repo": "flake-compat",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"flake-utils": {
|
"flake-utils": {
|
||||||
|
"inputs": {
|
||||||
|
"flake-utils": "flake-utils_2"
|
||||||
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1644229661,
|
"lastModified": 1738591040,
|
||||||
"narHash": "sha256-1YdnJAsNy69bpcjuoKdOYQX0YxZBiCYZo4Twxerqv7k=",
|
"narHash": "sha256-4WNeriUToshQ/L5J+dTSWC5OJIwT39SEP7V7oylndi8=",
|
||||||
"owner": "numtide",
|
"owner": "gytis-ivaskevicius",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils-plus",
|
||||||
"rev": "3cecb5b042f7f209c56ffd8371b2711a290ec797",
|
"rev": "afcb15b845e74ac5e998358709b2b5fe42a948d1",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "numtide",
|
"owner": "gytis-ivaskevicius",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils-plus",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"flake-utils_2": {
|
"flake-utils_2": {
|
||||||
|
"inputs": {
|
||||||
|
"systems": "systems"
|
||||||
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1637014545,
|
"lastModified": 1694529238,
|
||||||
"narHash": "sha256-26IZAc5yzlD9FlDT54io1oqG/bBoyka+FJk5guaX4x4=",
|
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
|
||||||
"owner": "numtide",
|
"owner": "numtide",
|
||||||
"repo": "flake-utils",
|
"repo": "flake-utils",
|
||||||
"rev": "bba5dcc8e0b20ab664967ad83d24d64cb64ec4f4",
|
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -46,74 +36,41 @@
|
|||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"gitignoresrc": {
|
|
||||||
"flake": false,
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1646480205,
|
|
||||||
"narHash": "sha256-kekOlTlu45vuK2L9nq8iVN17V3sB0WWPqTTW3a2SQG0=",
|
|
||||||
"owner": "hercules-ci",
|
|
||||||
"repo": "gitignore.nix",
|
|
||||||
"rev": "bff2832ec341cf30acb3a4d3e2e7f1f7b590116a",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "hercules-ci",
|
|
||||||
"repo": "gitignore.nix",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1647282937,
|
"lastModified": 1750969886,
|
||||||
"narHash": "sha256-K8Oo6QyFCfiEWTRpQVfzcwI3YNMKlz6Tu8rr+o3rzRQ=",
|
"narHash": "sha256-zW/OFnotiz/ndPFdebpo3X0CrbVNf22n4DjN2vxlb58=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "64fc73bd74f04d3e10cb4e70e1c65b92337e76db",
|
"rev": "a676066377a2fe7457369dd37c31fd2263b662f4",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"ref": "nixos-21.11",
|
"ref": "nixos-25.05",
|
||||||
"repo": "nixpkgs",
|
|
||||||
"type": "github"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nixpkgs_2": {
|
|
||||||
"locked": {
|
|
||||||
"lastModified": 1637453606,
|
|
||||||
"narHash": "sha256-Gy6cwUswft9xqsjWxFYEnx/63/qzaFUwatcbV5GF/GQ=",
|
|
||||||
"owner": "NixOS",
|
|
||||||
"repo": "nixpkgs",
|
|
||||||
"rev": "8afc4e543663ca0a6a4f496262cd05233737e732",
|
|
||||||
"type": "github"
|
|
||||||
},
|
|
||||||
"original": {
|
|
||||||
"owner": "NixOS",
|
|
||||||
"ref": "nixpkgs-unstable",
|
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-compat": "flake-compat",
|
|
||||||
"flake-utils": "flake-utils",
|
"flake-utils": "flake-utils",
|
||||||
"gitignoresrc": "gitignoresrc",
|
|
||||||
"nixpkgs": "nixpkgs",
|
"nixpkgs": "nixpkgs",
|
||||||
"rust-overlay": "rust-overlay"
|
"rust-overlay": "rust-overlay"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"rust-overlay": {
|
"rust-overlay": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"flake-utils": "flake-utils_2",
|
"nixpkgs": [
|
||||||
"nixpkgs": "nixpkgs_2"
|
"nixpkgs"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1647397753,
|
"lastModified": 1751251399,
|
||||||
"narHash": "sha256-Q8HjnWFj+Gdx4ElvBiF99xhhZpeGdn1OZsGzyOrg7+Y=",
|
"narHash": "sha256-y+viCuy/eKKpkX1K2gDvXIJI/yzvy6zA3HObapz9XZ0=",
|
||||||
"owner": "oxalica",
|
"owner": "oxalica",
|
||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"rev": "dcc7af39185159fb2b8356bacca0473804a5b90e",
|
"rev": "b22d5ee8c60ed1291521f2dde48784edd6bf695b",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -121,6 +78,21 @@
|
|||||||
"repo": "rust-overlay",
|
"repo": "rust-overlay",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"systems": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1681028828,
|
||||||
|
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-systems",
|
||||||
|
"repo": "default",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": "root",
|
"root": "root",
|
||||||
|
|||||||
115
flake.nix
@ -1,46 +1,89 @@
|
|||||||
{
|
rec {
|
||||||
description = "basic rust flake";
|
description = "adf-bdd, Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden";
|
||||||
|
|
||||||
inputs = {
|
inputs = {
|
||||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-21.11";
|
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.05";
|
||||||
rust-overlay.url = "github:oxalica/rust-overlay";
|
rust-overlay = {
|
||||||
flake-utils.url = "github:numtide/flake-utils";
|
url = "github:oxalica/rust-overlay";
|
||||||
flake-compat = {
|
inputs = {
|
||||||
url = "github:edolstra/flake-compat";
|
nixpkgs.follows = "nixpkgs";
|
||||||
flake = false;
|
flake-utils.follows = "flake-utils/flake-utils";
|
||||||
};
|
};
|
||||||
gitignoresrc = {
|
|
||||||
url = "github:hercules-ci/gitignore.nix";
|
|
||||||
flake = false;
|
|
||||||
};
|
};
|
||||||
|
flake-utils.url = "github:gytis-ivaskevicius/flake-utils-plus";
|
||||||
};
|
};
|
||||||
|
|
||||||
outputs = { self, nixpkgs, flake-utils, flake-compat, gitignoresrc, rust-overlay, ... }@inputs:
|
outputs = inputs @ {
|
||||||
{
|
self,
|
||||||
#overlay = import ./nix { inherit gitignoresrc; };
|
flake-utils,
|
||||||
} // (flake-utils.lib.eachDefaultSystem (system:
|
rust-overlay,
|
||||||
let
|
...
|
||||||
pkgs = import nixpkgs {
|
}:
|
||||||
inherit system;
|
flake-utils.lib.mkFlake {
|
||||||
overlays = [ (import rust-overlay)];
|
inherit self inputs;
|
||||||
|
channels.nixpkgs.overlaysBuilder = channels: [rust-overlay.overlays.default];
|
||||||
|
outputsBuilder = channels: let
|
||||||
|
pkgs = channels.nixpkgs;
|
||||||
|
toolchain = pkgs.rust-bin.stable.latest.default;
|
||||||
|
platform = pkgs.makeRustPlatform {
|
||||||
|
cargo = toolchain;
|
||||||
|
rustc = toolchain;
|
||||||
};
|
};
|
||||||
in
|
in rec {
|
||||||
rec {
|
packages = let
|
||||||
devShell =
|
cargoMetaBin = (builtins.fromTOML (builtins.readFile ./bin/Cargo.toml)).package;
|
||||||
pkgs.mkShell {
|
cargoMetaLib = (builtins.fromTOML (builtins.readFile ./lib/Cargo.toml)).package;
|
||||||
RUST_LOG = "debug";
|
meta = {
|
||||||
RUST_BACKTRACE = 1;
|
inherit description;
|
||||||
buildInputs = [
|
homepage = "https://github.com/ellmau/adf-obdd";
|
||||||
pkgs.rust-bin.nightly.latest.rustfmt
|
license = [pkgs.lib.licenses.mit];
|
||||||
pkgs.rust-bin.stable.latest.default
|
|
||||||
|
nativeBuildInputs = with platform; [
|
||||||
|
cargoBuildHook
|
||||||
|
cargoCheckHook
|
||||||
|
];
|
||||||
|
};
|
||||||
|
in rec {
|
||||||
|
adf-bdd = platform.buildRustPackage {
|
||||||
|
pname = "adf-bdd";
|
||||||
|
inherit (cargoMetaBin) version;
|
||||||
|
inherit meta;
|
||||||
|
|
||||||
|
src = ./.;
|
||||||
|
cargoLock.lockFile = ./Cargo.lock;
|
||||||
|
|
||||||
|
buildAndTestSubdir = "bin";
|
||||||
|
};
|
||||||
|
adf_bdd = platform.buildRustPackage {
|
||||||
|
pname = "adf_bdd";
|
||||||
|
inherit (cargoMetaLib) version;
|
||||||
|
inherit meta;
|
||||||
|
|
||||||
|
src = ./.;
|
||||||
|
cargoLock.lockFile = ./Cargo.lock;
|
||||||
|
|
||||||
|
buildAndTestSubdir = "lib";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
RUST_LOG = "debug";
|
||||||
|
RUST_BACKTRACE = 1;
|
||||||
|
shellHook = ''
|
||||||
|
export PATH=''${HOME}/.cargo/bin''${PATH+:''${PATH}}
|
||||||
|
'';
|
||||||
|
buildInputs = let
|
||||||
|
notOn = systems:
|
||||||
|
pkgs.lib.optionals (!builtins.elem pkgs.system systems);
|
||||||
|
in
|
||||||
|
[
|
||||||
|
toolchain
|
||||||
pkgs.rust-analyzer
|
pkgs.rust-analyzer
|
||||||
pkgs.cargo-audit
|
pkgs.cargo-audit
|
||||||
pkgs.cargo-license
|
pkgs.cargo-license
|
||||||
pkgs.cargo-tarpaulin
|
]
|
||||||
pkgs.cargo-kcov
|
++ (notOn ["aarch64-darwin" "x86_64-darwin"] [pkgs.kcov pkgs.gnuplot pkgs.valgrind])
|
||||||
pkgs.kcov
|
++ (notOn ["aarch64-linux" "aarch64-darwin" "i686-linux"] [pkgs.cargo-tarpaulin]);
|
||||||
];
|
};
|
||||||
};
|
};
|
||||||
}
|
};
|
||||||
));
|
|
||||||
}
|
}
|
||||||
|
|||||||
13
frontend/.editorconfig
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[*.{ts,tsx}]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
|
|
||||||
|
[package.json]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
27
frontend/.eslintrc.js
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
module.exports = {
|
||||||
|
"env": {
|
||||||
|
"browser": true,
|
||||||
|
"es2021": true
|
||||||
|
},
|
||||||
|
"extends": [
|
||||||
|
"plugin:react/recommended",
|
||||||
|
"airbnb",
|
||||||
|
"airbnb-typescript",
|
||||||
|
],
|
||||||
|
"parser": "@typescript-eslint/parser",
|
||||||
|
"parserOptions": {
|
||||||
|
"ecmaFeatures": {
|
||||||
|
"jsx": true
|
||||||
|
},
|
||||||
|
"ecmaVersion": "latest",
|
||||||
|
"sourceType": "module",
|
||||||
|
"project": "tsconfig.json"
|
||||||
|
},
|
||||||
|
"plugins": [
|
||||||
|
"react",
|
||||||
|
"@typescript-eslint"
|
||||||
|
],
|
||||||
|
"rules": {
|
||||||
|
"react/jsx-filename-extension": [1, { "extensions": [".tsx"] }]
|
||||||
|
}
|
||||||
|
}
|
||||||
5
frontend/.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
node_modules
|
||||||
|
dist
|
||||||
|
.parcel-cache
|
||||||
|
yarn-error.log
|
||||||
|
|
||||||
13
frontend/README.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Frontend for Webservice
|
||||||
|
|
||||||
|
This directory contains the (standalone) frontend for <https://adf-bdd.dev> built using React, Material UI and Typescript.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
For local development run:
|
||||||
|
|
||||||
|
- `yarn install` to install the dependencies
|
||||||
|
- `yarn run check` to run typechecks and the linter (eslint)
|
||||||
|
- `yarn start` to start the development server listening on `localhost:1234`
|
||||||
|
|
||||||
|
The frontend tries to connect to the server at `localhost:8080` in development mode.
|
||||||
5
frontend/index.d.ts
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
declare module 'bundle-text:*' {
|
||||||
|
const s: string
|
||||||
|
export default s
|
||||||
|
}
|
||||||
|
|
||||||
40
frontend/package.json
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
{
|
||||||
|
"name": "ADF-OBDD-Frontend",
|
||||||
|
"version": "0.1.0",
|
||||||
|
"source": "src/index.html",
|
||||||
|
"browserslist": "> 0.5%, last 2 versions, not dead",
|
||||||
|
"scripts": {
|
||||||
|
"check": "tsc --noEmit && eslint ./src",
|
||||||
|
"start": "parcel",
|
||||||
|
"build": "parcel build"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@parcel/transformer-inline-string": "2.9.3",
|
||||||
|
"@types/node": "^20.4.6",
|
||||||
|
"@types/react": "^18.2.18",
|
||||||
|
"@types/react-dom": "^18.2.7",
|
||||||
|
"@typescript-eslint/eslint-plugin": "^6.2.1",
|
||||||
|
"@typescript-eslint/parser": "^6.2.1",
|
||||||
|
"eslint": "^8.46.0",
|
||||||
|
"eslint-config-airbnb": "^19.0.4",
|
||||||
|
"eslint-config-airbnb-typescript": "^17.1.0",
|
||||||
|
"eslint-plugin-import": "^2.28.0",
|
||||||
|
"eslint-plugin-jsx-a11y": "^6.7.1",
|
||||||
|
"eslint-plugin-react": "^7.33.1",
|
||||||
|
"parcel": "^2.9.3",
|
||||||
|
"process": "^0.11.10",
|
||||||
|
"typescript": "^5.1.6"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@antv/g6": "^4.8.20",
|
||||||
|
"@emotion/react": "^11.11.1",
|
||||||
|
"@emotion/styled": "^11.11.0",
|
||||||
|
"@fontsource/roboto": "^5.0.6",
|
||||||
|
"@mui/icons-material": "^5.14.3",
|
||||||
|
"@mui/material": "^5.14.3",
|
||||||
|
"markdown-to-jsx": "^7.2.1",
|
||||||
|
"react": "^18.2.0",
|
||||||
|
"react-dom": "^18.2.0",
|
||||||
|
"react-router-dom": "^6.14.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
8
frontend/shell.nix
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
{ pkgs ? import <nixpkgs> {} }:
|
||||||
|
|
||||||
|
pkgs.mkShell {
|
||||||
|
buildInputs = [
|
||||||
|
pkgs.yarn
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
13
frontend/src/app.tsx
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import * as React from 'react';
|
||||||
|
import { createRoot } from 'react-dom/client';
|
||||||
|
|
||||||
|
import '@fontsource/roboto/300.css';
|
||||||
|
import '@fontsource/roboto/400.css';
|
||||||
|
import '@fontsource/roboto/500.css';
|
||||||
|
import '@fontsource/roboto/700.css';
|
||||||
|
|
||||||
|
import App from './components/app';
|
||||||
|
|
||||||
|
const container = document.getElementById('app');
|
||||||
|
const root = createRoot(container!);
|
||||||
|
root.render(<App />);
|
||||||
371
frontend/src/components/adf-details.tsx
Normal file
@ -0,0 +1,371 @@
|
|||||||
|
import React, {
|
||||||
|
useState, useContext, useEffect, useCallback, useRef,
|
||||||
|
} from 'react';
|
||||||
|
import { useParams, useNavigate } from 'react-router-dom';
|
||||||
|
import {
|
||||||
|
Accordion,
|
||||||
|
AccordionDetails,
|
||||||
|
AccordionSummary,
|
||||||
|
Alert,
|
||||||
|
AlertColor,
|
||||||
|
Button,
|
||||||
|
Chip,
|
||||||
|
Container,
|
||||||
|
Grid,
|
||||||
|
Paper,
|
||||||
|
Pagination,
|
||||||
|
Skeleton,
|
||||||
|
Stack,
|
||||||
|
Tabs,
|
||||||
|
Tab,
|
||||||
|
TextField,
|
||||||
|
Typography,
|
||||||
|
} from '@mui/material';
|
||||||
|
|
||||||
|
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
|
||||||
|
|
||||||
|
import DetailInfoMd from 'bundle-text:../help-texts/detail-info.md';
|
||||||
|
import Markdown from './markdown';
|
||||||
|
|
||||||
|
import GraphG6, { GraphProps } from './graph-g6';
|
||||||
|
import LoadingContext from './loading-context';
|
||||||
|
import SnackbarContext from './snackbar-context';
|
||||||
|
|
||||||
|
export type Parsing = 'Naive' | 'Hybrid';
|
||||||
|
|
||||||
|
export type StrategySnakeCase = 'parse_only' | 'ground' | 'complete' | 'stable' | 'stable_counting_a' | 'stable_counting_b' | 'stable_nogood';
|
||||||
|
|
||||||
|
export type StrategyCamelCase = 'ParseOnly' | 'Ground' | 'Complete' | 'Stable' | 'StableCountingA' | 'StableCountingB' | 'StableNogood';
|
||||||
|
export const STRATEGIES_WITHOUT_PARSE: StrategyCamelCase[] = ['Ground', 'Complete', 'Stable', 'StableCountingA', 'StableCountingB', 'StableNogood'];
|
||||||
|
|
||||||
|
export interface AcAndGraph {
|
||||||
|
ac: string[],
|
||||||
|
graph: GraphProps,
|
||||||
|
}
|
||||||
|
|
||||||
|
export type AcsWithGraphsOpt = {
|
||||||
|
type: 'None',
|
||||||
|
} | {
|
||||||
|
type: 'Error',
|
||||||
|
content: string
|
||||||
|
} | {
|
||||||
|
type: 'Some',
|
||||||
|
content: AcAndGraph[]
|
||||||
|
};
|
||||||
|
|
||||||
|
export type Task = {
|
||||||
|
type: 'Parse',
|
||||||
|
} | {
|
||||||
|
type: 'Solve',
|
||||||
|
content: StrategyCamelCase,
|
||||||
|
};
|
||||||
|
|
||||||
|
export interface AdfProblemInfo {
|
||||||
|
name: string,
|
||||||
|
code: string,
|
||||||
|
parsing_used: Parsing,
|
||||||
|
// NOTE: the keys are really only strategies
|
||||||
|
acs_per_strategy: { [key in StrategySnakeCase]: AcsWithGraphsOpt },
|
||||||
|
running_tasks: Task[],
|
||||||
|
}
|
||||||
|
|
||||||
|
export function acsWithGraphOptToColor(status: AcsWithGraphsOpt, running: boolean): AlertColor {
|
||||||
|
if (running) {
|
||||||
|
return 'warning';
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (status.type) {
|
||||||
|
case 'None': return 'info';
|
||||||
|
case 'Error': return 'error';
|
||||||
|
case 'Some': return 'success';
|
||||||
|
default:
|
||||||
|
throw new Error('Unknown type union variant (cannot occur)');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function acsWithGraphOptToText(status: AcsWithGraphsOpt, running: boolean): string {
|
||||||
|
if (running) {
|
||||||
|
return 'Running';
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (status.type) {
|
||||||
|
case 'None': return 'Not attempted';
|
||||||
|
case 'Error': return 'Failed';
|
||||||
|
case 'Some': return 'Done';
|
||||||
|
default:
|
||||||
|
throw new Error('Unknown type union variant (cannot occur)');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function AdfDetails() {
|
||||||
|
const { adfName } = useParams();
|
||||||
|
const navigate = useNavigate();
|
||||||
|
|
||||||
|
const { setLoading } = useContext(LoadingContext);
|
||||||
|
const { status: snackbarInfo, setStatus: setSnackbarInfo } = useContext(SnackbarContext);
|
||||||
|
const [problem, setProblem] = useState<AdfProblemInfo>();
|
||||||
|
const [tab, setTab] = useState<StrategySnakeCase>('parse_only');
|
||||||
|
const [solutionIndex, setSolutionIndex] = useState<number>(0);
|
||||||
|
|
||||||
|
const isFirstRender = useRef(true);
|
||||||
|
|
||||||
|
const fetchProblem = useCallback(
|
||||||
|
() => {
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}/adf/${adfName}`, {
|
||||||
|
method: 'GET',
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
res.json().then((resProblem) => {
|
||||||
|
setProblem(resProblem);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
navigate('/');
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[setProblem],
|
||||||
|
);
|
||||||
|
|
||||||
|
const solveHandler = useCallback(
|
||||||
|
(strategy: StrategyCamelCase) => {
|
||||||
|
setLoading(true);
|
||||||
|
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}/adf/${adfName}/solve`, {
|
||||||
|
method: 'PUT',
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({ strategy }),
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
setSnackbarInfo({ message: 'Solving problem now...', severity: 'success', potentialUserChange: false });
|
||||||
|
fetchProblem();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
setSnackbarInfo({ message: 'Something went wrong tying to solve the problem.', severity: 'error', potentialUserChange: false });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.finally(() => setLoading(false));
|
||||||
|
},
|
||||||
|
[adfName],
|
||||||
|
);
|
||||||
|
|
||||||
|
const deleteHandler = useCallback(
|
||||||
|
() => {
|
||||||
|
setLoading(true);
|
||||||
|
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}/adf/${adfName}`, {
|
||||||
|
method: 'DELETE',
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
setSnackbarInfo({ message: 'ADF Problem deleted.', severity: 'success', potentialUserChange: false });
|
||||||
|
navigate('/');
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.finally(() => setLoading(false));
|
||||||
|
},
|
||||||
|
[adfName],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => {
|
||||||
|
// TODO: having the info if the user may have changed on the snackbar info
|
||||||
|
// is a bit lazy and unclean; be better!
|
||||||
|
if (isFirstRender.current || snackbarInfo?.potentialUserChange) {
|
||||||
|
isFirstRender.current = false;
|
||||||
|
|
||||||
|
fetchProblem();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[snackbarInfo?.potentialUserChange],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => {
|
||||||
|
// if there is a running task, fetch problems again after 20 seconds
|
||||||
|
let timeout: ReturnType<typeof setTimeout>;
|
||||||
|
if (problem && problem.running_tasks.length > 0) {
|
||||||
|
timeout = setTimeout(() => fetchProblem(), 20000);
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
if (timeout) {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
},
|
||||||
|
[problem],
|
||||||
|
);
|
||||||
|
|
||||||
|
const acsOpt = problem?.acs_per_strategy[tab];
|
||||||
|
const acsContent = acsOpt?.type === 'Some' ? acsOpt.content : undefined;
|
||||||
|
const tabCamelCase: StrategyCamelCase = tab.replace(/^([a-z])/, (_, p1) => p1.toUpperCase()).replace(/_([a-z])/g, (_, p1) => `${p1.toUpperCase()}`) as StrategyCamelCase;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Typography variant="h3" component="h1" align="center" gutterBottom>
|
||||||
|
ADF-BDD.DEV
|
||||||
|
</Typography>
|
||||||
|
<Container sx={{ marginTop: 2, marginBottom: 2 }}>
|
||||||
|
<Accordion>
|
||||||
|
<AccordionSummary expandIcon={<ExpandMoreIcon />}>
|
||||||
|
<span style={{ fontWeight: 'bold' }}>What can I do with the ADF now?</span>
|
||||||
|
</AccordionSummary>
|
||||||
|
<AccordionDetails>
|
||||||
|
<Grid container alignItems="center" spacing={2}>
|
||||||
|
<Grid item xs={12} sm={8}>
|
||||||
|
<Markdown>{DetailInfoMd}</Markdown>
|
||||||
|
</Grid>
|
||||||
|
<Grid item xs={12} sm={4}>
|
||||||
|
<img
|
||||||
|
src={new URL('../help-texts/example-bdd.png', import.meta.url).toString()}
|
||||||
|
alt="Example BDD"
|
||||||
|
style={{ maxWidth: '100%', borderRadius: 4, boxShadow: '0 0 5px 0 rgba(0,0,0,0.4)' }}
|
||||||
|
/>
|
||||||
|
</Grid>
|
||||||
|
</Grid>
|
||||||
|
</AccordionDetails>
|
||||||
|
</Accordion>
|
||||||
|
</Container>
|
||||||
|
<Container sx={{ marginBottom: 4 }}>
|
||||||
|
{problem ? (
|
||||||
|
<>
|
||||||
|
<Paper elevation={8} sx={{ padding: 2, marginBottom: 2 }}>
|
||||||
|
<Stack direction="row" justifyContent="space-between" sx={{ marginBottom: 1 }}>
|
||||||
|
<Button
|
||||||
|
variant="outlined"
|
||||||
|
color="info"
|
||||||
|
onClick={() => { navigate('/'); }}
|
||||||
|
>
|
||||||
|
Back
|
||||||
|
</Button>
|
||||||
|
<Typography variant="h4" component="h2" align="center" gutterBottom>
|
||||||
|
{problem.name}
|
||||||
|
</Typography>
|
||||||
|
<Button
|
||||||
|
type="button"
|
||||||
|
variant="outlined"
|
||||||
|
color="error"
|
||||||
|
onClick={() => {
|
||||||
|
// eslint-disable-next-line no-alert
|
||||||
|
if (window.confirm('Are you sure that you want to delete this ADF problem?')) {
|
||||||
|
deleteHandler();
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Delete
|
||||||
|
</Button>
|
||||||
|
</Stack>
|
||||||
|
<TextField
|
||||||
|
name="code"
|
||||||
|
label="Code"
|
||||||
|
helperText="Click here to copy!"
|
||||||
|
multiline
|
||||||
|
maxRows={5}
|
||||||
|
fullWidth
|
||||||
|
variant="filled"
|
||||||
|
value={problem.code.trim()}
|
||||||
|
disabled
|
||||||
|
sx={{ cursor: 'pointer' }}
|
||||||
|
onClick={() => { navigator.clipboard.writeText(problem.code); setSnackbarInfo({ message: 'Code copied to clipboard!', severity: 'info', potentialUserChange: false }); }}
|
||||||
|
/>
|
||||||
|
</Paper>
|
||||||
|
<Tabs
|
||||||
|
value={tab}
|
||||||
|
onChange={(_e, newTab) => { setTab(newTab); setSolutionIndex(0); }}
|
||||||
|
variant="scrollable"
|
||||||
|
scrollButtons="auto"
|
||||||
|
>
|
||||||
|
<Tab wrapped value="parse_only" label={<Chip color={acsWithGraphOptToColor(problem.acs_per_strategy.parse_only, problem.running_tasks.some((t: Task) => t.type === 'Parse'))} label={`${problem.parsing_used} Parsing`} sx={{ cursor: 'inherit' }} />} />
|
||||||
|
{STRATEGIES_WITHOUT_PARSE.map((strategy) => {
|
||||||
|
const spaced = strategy.replace(/([A-Za-z])([A-Z])/g, '$1 $2');
|
||||||
|
const snakeCase = strategy.replace(/^([A-Z])/, (_, p1) => p1.toLowerCase()).replace(/([A-Z])/g, (_, p1) => `_${p1.toLowerCase()}`) as StrategySnakeCase;
|
||||||
|
const status = problem.acs_per_strategy[snakeCase];
|
||||||
|
|
||||||
|
const running = problem.running_tasks.some((t: Task) => t.type === 'Solve' && t.content === strategy);
|
||||||
|
|
||||||
|
const color = acsWithGraphOptToColor(status, running);
|
||||||
|
|
||||||
|
return <Tab key={strategy} wrapped value={snakeCase} label={<Chip color={color} label={spaced} sx={{ cursor: 'inherit' }} />} />;
|
||||||
|
})}
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
|
{acsContent && acsContent.length > 1 && (
|
||||||
|
<>
|
||||||
|
Models:
|
||||||
|
<br />
|
||||||
|
<Pagination variant="outlined" shape="rounded" count={acsContent.length} page={solutionIndex + 1} onChange={(_e, newIdx) => setSolutionIndex(newIdx - 1)} />
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
<Paper elevation={3} square sx={{ padding: 2, marginTop: 4, marginBottom: 4 }}>
|
||||||
|
{problem.running_tasks.some((t: Task) => (tab === 'parse_only' && t.type === 'Parse') || (t.type === 'Solve' && t.content === tabCamelCase)) ? (
|
||||||
|
<Alert severity="warning">Working hard to solve the problem right now...</Alert>
|
||||||
|
) : (
|
||||||
|
<>
|
||||||
|
{acsContent && acsContent.length > 0 && (
|
||||||
|
<GraphG6 graph={acsContent[solutionIndex].graph} />
|
||||||
|
)}
|
||||||
|
{acsContent && acsContent.length === 0 && (
|
||||||
|
<Alert severity="info">The problem has no models for this strategy.</Alert>
|
||||||
|
)}
|
||||||
|
{!acsContent && acsOpt?.type === 'Error' && (
|
||||||
|
<Alert severity="error">
|
||||||
|
An error occurred:
|
||||||
|
{acsOpt.content}
|
||||||
|
</Alert>
|
||||||
|
)}
|
||||||
|
{!acsContent && acsOpt?.type === 'None' && (
|
||||||
|
<>
|
||||||
|
<Alert severity="info" sx={{ marginBottom: 1 }}>This strategy was not attempted yet.</Alert>
|
||||||
|
<Button
|
||||||
|
variant="contained"
|
||||||
|
size="large"
|
||||||
|
color="warning"
|
||||||
|
onClick={() => {
|
||||||
|
solveHandler(tabCamelCase);
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Solve now!
|
||||||
|
</Button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</Paper>
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<>
|
||||||
|
<Paper elevation={8} sx={{ padding: 2, marginBottom: 8 }}>
|
||||||
|
<Skeleton variant="text" width="50%" sx={{ fontSize: '2.125rem', margin: 'auto' }} />
|
||||||
|
<Skeleton variant="rounded" width="100%" height={200} />
|
||||||
|
</Paper>
|
||||||
|
<Skeleton variant="rectangular" width="100%" height={500} />
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</Container>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default AdfDetails;
|
||||||
187
frontend/src/components/adf-new-form.tsx
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
import React, {
|
||||||
|
useState, useContext, useCallback, useRef,
|
||||||
|
} from 'react';
|
||||||
|
|
||||||
|
import {
|
||||||
|
Button,
|
||||||
|
Container,
|
||||||
|
FormControl,
|
||||||
|
FormControlLabel,
|
||||||
|
FormLabel,
|
||||||
|
Link,
|
||||||
|
Paper,
|
||||||
|
Radio,
|
||||||
|
RadioGroup,
|
||||||
|
Stack,
|
||||||
|
Typography,
|
||||||
|
TextField,
|
||||||
|
ToggleButtonGroup,
|
||||||
|
ToggleButton,
|
||||||
|
} from '@mui/material';
|
||||||
|
|
||||||
|
import LoadingContext from './loading-context';
|
||||||
|
import SnackbarContext from './snackbar-context';
|
||||||
|
|
||||||
|
import { Parsing } from './adf-details';
|
||||||
|
|
||||||
|
const PLACEHOLDER = `s(a).
|
||||||
|
s(b).
|
||||||
|
s(c).
|
||||||
|
s(d).
|
||||||
|
ac(a,c(v)).
|
||||||
|
ac(b,b).
|
||||||
|
ac(c,and(a,b)).
|
||||||
|
ac(d,neg(b)).`;
|
||||||
|
|
||||||
|
function AdfNewForm({ fetchProblems }: { fetchProblems: () => void; }) {
|
||||||
|
const { setLoading } = useContext(LoadingContext);
|
||||||
|
const { setStatus: setSnackbarInfo } = useContext(SnackbarContext);
|
||||||
|
const [isFileUpload, setFileUpload] = useState(false);
|
||||||
|
const [code, setCode] = useState(PLACEHOLDER);
|
||||||
|
const [filename, setFilename] = useState('');
|
||||||
|
const [parsing, setParsing] = useState<Parsing>('Naive');
|
||||||
|
const [isAf, setIsAf] = useState(false);
|
||||||
|
const [name, setName] = useState('');
|
||||||
|
const fileRef = useRef<HTMLInputElement>(null);
|
||||||
|
|
||||||
|
const addAdf = useCallback(
|
||||||
|
() => {
|
||||||
|
setLoading(true);
|
||||||
|
|
||||||
|
const formData = new FormData();
|
||||||
|
|
||||||
|
if (isFileUpload && fileRef.current) {
|
||||||
|
const file = fileRef.current.files?.[0];
|
||||||
|
if (file) {
|
||||||
|
formData.append('file', file);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
formData.append('code', code);
|
||||||
|
}
|
||||||
|
|
||||||
|
formData.append('parsing', parsing);
|
||||||
|
formData.append('is_af', isAf);
|
||||||
|
formData.append('name', name);
|
||||||
|
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}/adf/add`, {
|
||||||
|
method: 'POST',
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
body: formData,
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
setSnackbarInfo({ message: 'Successfully added ADF problem!', severity: 'success', potentialUserChange: true });
|
||||||
|
fetchProblems();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
setSnackbarInfo({ message: 'An error occured while adding the ADF problem.', severity: 'error', potentialUserChange: true });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.finally(() => setLoading(false));
|
||||||
|
},
|
||||||
|
[isFileUpload, code, filename, parsing, name, fileRef.current],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Container>
|
||||||
|
<Paper elevation={8} sx={{ padding: 2 }}>
|
||||||
|
<Typography variant="h4" component="h2" align="center" gutterBottom>
|
||||||
|
Add a new Problem
|
||||||
|
</Typography>
|
||||||
|
<Container sx={{ marginTop: 2, marginBottom: 2 }}>
|
||||||
|
<Stack direction="row" justifyContent="center">
|
||||||
|
<ToggleButtonGroup
|
||||||
|
value={isFileUpload}
|
||||||
|
exclusive
|
||||||
|
onChange={(_e, newValue) => { setFileUpload(newValue); setFilename(''); }}
|
||||||
|
>
|
||||||
|
<ToggleButton value={false}>
|
||||||
|
Write by Hand
|
||||||
|
</ToggleButton>
|
||||||
|
<ToggleButton value>
|
||||||
|
Upload File
|
||||||
|
</ToggleButton>
|
||||||
|
</ToggleButtonGroup>
|
||||||
|
</Stack>
|
||||||
|
</Container>
|
||||||
|
|
||||||
|
<Container sx={{ marginTop: 2, marginBottom: 2 }}>
|
||||||
|
{isFileUpload ? (
|
||||||
|
<Stack direction="row" justifyContent="center">
|
||||||
|
<Button component="label">
|
||||||
|
{(!!filename && fileRef?.current?.files?.[0]) ? `File '${filename.split(/[\\/]/).pop()}' selected! (Click to change)` : 'Upload File'}
|
||||||
|
<input hidden type="file" onChange={(event) => { setFilename(event.target.value); }} ref={fileRef} />
|
||||||
|
</Button>
|
||||||
|
</Stack>
|
||||||
|
) : (
|
||||||
|
<TextField
|
||||||
|
name="code"
|
||||||
|
label="Put your code here:"
|
||||||
|
helperText={(
|
||||||
|
<>
|
||||||
|
For more info on the ADF syntax, have a
|
||||||
|
look
|
||||||
|
{' '}
|
||||||
|
<Link href="https://github.com/ellmau/adf-obdd" target="_blank" rel="noopener noreferrer">here</Link>
|
||||||
|
. For the AF syntax, we currently only allow the ICCMA competition format, see for example
|
||||||
|
{' '}
|
||||||
|
<Link href="https://argumentationcompetition.org/2025/rules.html" target="_blank" rel="noopener noreferrer">here</Link>
|
||||||
|
.
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
multiline
|
||||||
|
fullWidth
|
||||||
|
variant="filled"
|
||||||
|
value={code}
|
||||||
|
onChange={(event) => { setCode(event.target.value); }}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
</Container>
|
||||||
|
|
||||||
|
<Container sx={{ marginTop: 2 }}>
|
||||||
|
<Stack direction="row" justifyContent="center" spacing={2}>
|
||||||
|
<FormControl>
|
||||||
|
<FormLabel id="isAf-radio-group">ADF or AF?</FormLabel>
|
||||||
|
<RadioGroup
|
||||||
|
row
|
||||||
|
aria-labelledby="isAf-radio-group"
|
||||||
|
name="isAf"
|
||||||
|
value={isAf}
|
||||||
|
onChange={(e) => setIsAf(((e.target as HTMLInputElement).value))}
|
||||||
|
>
|
||||||
|
<FormControlLabel value={false} control={<Radio />} label="ADF" />
|
||||||
|
<FormControlLabel value={true} control={<Radio />} label="AF" />
|
||||||
|
</RadioGroup>
|
||||||
|
<span style={{ fontSize: "0.7em" }}>AFs are converted to ADFs internally.</span>
|
||||||
|
</FormControl>
|
||||||
|
<FormControl>
|
||||||
|
<FormLabel id="parsing-radio-group">Parsing Strategy</FormLabel>
|
||||||
|
<RadioGroup
|
||||||
|
row
|
||||||
|
aria-labelledby="parsing-radio-group"
|
||||||
|
name="parsing"
|
||||||
|
value={parsing}
|
||||||
|
onChange={(e) => setParsing(((e.target as HTMLInputElement).value) as Parsing)}
|
||||||
|
>
|
||||||
|
<FormControlLabel value="Naive" control={<Radio />} label="Naive" />
|
||||||
|
<FormControlLabel value="Hybrid" control={<Radio />} label="Hybrid" />
|
||||||
|
</RadioGroup>
|
||||||
|
</FormControl>
|
||||||
|
<TextField
|
||||||
|
name="name"
|
||||||
|
label="Adf Problem Name (optional):"
|
||||||
|
variant="standard"
|
||||||
|
value={name}
|
||||||
|
onChange={(event) => { setName(event.target.value); }}
|
||||||
|
/>
|
||||||
|
<Button variant="outlined" onClick={() => addAdf()}>Add Adf Problem</Button>
|
||||||
|
</Stack>
|
||||||
|
</Container>
|
||||||
|
</Paper>
|
||||||
|
</Container>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default AdfNewForm;
|
||||||
189
frontend/src/components/adf-overview.tsx
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
import React, {
|
||||||
|
useRef, useState, useCallback, useEffect, useContext,
|
||||||
|
} from 'react';
|
||||||
|
|
||||||
|
import {
|
||||||
|
useNavigate,
|
||||||
|
} from 'react-router-dom';
|
||||||
|
|
||||||
|
import {
|
||||||
|
Accordion,
|
||||||
|
AccordionDetails,
|
||||||
|
AccordionSummary,
|
||||||
|
Chip,
|
||||||
|
Container,
|
||||||
|
Paper,
|
||||||
|
TableContainer,
|
||||||
|
Table,
|
||||||
|
TableHead,
|
||||||
|
TableRow,
|
||||||
|
TableCell,
|
||||||
|
TableBody,
|
||||||
|
Typography,
|
||||||
|
} from '@mui/material';
|
||||||
|
|
||||||
|
import ExpandMoreIcon from '@mui/icons-material/ExpandMore';
|
||||||
|
|
||||||
|
import AddInfoMd from 'bundle-text:../help-texts/add-info.md';
|
||||||
|
import Markdown from './markdown';
|
||||||
|
|
||||||
|
import AdfNewForm from './adf-new-form';
|
||||||
|
|
||||||
|
import {
|
||||||
|
AdfProblemInfo,
|
||||||
|
StrategySnakeCase,
|
||||||
|
STRATEGIES_WITHOUT_PARSE,
|
||||||
|
Task,
|
||||||
|
acsWithGraphOptToColor,
|
||||||
|
acsWithGraphOptToText,
|
||||||
|
} from './adf-details';
|
||||||
|
|
||||||
|
import SnackbarContext from './snackbar-context';
|
||||||
|
|
||||||
|
function AdfOverview() {
|
||||||
|
const { status: snackbarInfo } = useContext(SnackbarContext);
|
||||||
|
const [problems, setProblems] = useState<AdfProblemInfo[]>([]);
|
||||||
|
|
||||||
|
const navigate = useNavigate();
|
||||||
|
|
||||||
|
const isFirstRender = useRef(true);
|
||||||
|
|
||||||
|
const fetchProblems = useCallback(
|
||||||
|
() => {
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}/adf/`, {
|
||||||
|
method: 'GET',
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
res.json().then((resProblems) => {
|
||||||
|
setProblems(resProblems);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
case 401:
|
||||||
|
setProblems([]);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[setProblems],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => {
|
||||||
|
// TODO: having the info if the user may have changed on the snackbar info
|
||||||
|
// is a bit lazy and unclean; be better!
|
||||||
|
if (isFirstRender.current || snackbarInfo?.potentialUserChange) {
|
||||||
|
isFirstRender.current = false;
|
||||||
|
|
||||||
|
fetchProblems();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[snackbarInfo?.potentialUserChange],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => {
|
||||||
|
// if there is a running task, fetch problems again after 20 seconds
|
||||||
|
let timeout: ReturnType<typeof setTimeout>;
|
||||||
|
if (problems.some((p) => p.running_tasks.length > 0)) {
|
||||||
|
timeout = setTimeout(() => fetchProblems(), 20000);
|
||||||
|
}
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
if (timeout) {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
},
|
||||||
|
[problems],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Typography variant="h3" component="h1" align="center" gutterBottom>
|
||||||
|
ADF-BDD.DEV
|
||||||
|
</Typography>
|
||||||
|
<Container sx={{ marginTop: 2, marginBottom: 2 }}>
|
||||||
|
<Accordion>
|
||||||
|
<AccordionSummary expandIcon={<ExpandMoreIcon />}>
|
||||||
|
<span style={{ fontWeight: 'bold' }}>What is this webapp doing and how should I use it?</span>
|
||||||
|
</AccordionSummary>
|
||||||
|
<AccordionDetails>
|
||||||
|
<Markdown>{AddInfoMd}</Markdown>
|
||||||
|
</AccordionDetails>
|
||||||
|
</Accordion>
|
||||||
|
</Container>
|
||||||
|
{problems.length > 0
|
||||||
|
&& (
|
||||||
|
<Container sx={{ marginBottom: 4 }}>
|
||||||
|
<Paper elevation={8} sx={{ padding: 2 }}>
|
||||||
|
<Typography variant="h4" component="h2" align="center" gutterBottom>
|
||||||
|
Existing Problems
|
||||||
|
</Typography>
|
||||||
|
<TableContainer component={Paper}>
|
||||||
|
<Table>
|
||||||
|
<TableHead>
|
||||||
|
<TableRow>
|
||||||
|
<TableCell align="center">ADF Problem Name</TableCell>
|
||||||
|
<TableCell align="center">Parse Status</TableCell>
|
||||||
|
<TableCell align="center">Grounded Solution</TableCell>
|
||||||
|
<TableCell align="center">Complete Solution</TableCell>
|
||||||
|
<TableCell align="center">Stable Solution</TableCell>
|
||||||
|
<TableCell align="center">Stable Solution (Counting Method A)</TableCell>
|
||||||
|
<TableCell align="center">Stable Solution (Counting Method B)</TableCell>
|
||||||
|
<TableCell align="center">Stable Solution (Nogood-Based)</TableCell>
|
||||||
|
</TableRow>
|
||||||
|
</TableHead>
|
||||||
|
<TableBody>
|
||||||
|
{problems.map((problem) => (
|
||||||
|
<TableRow
|
||||||
|
key={problem.name}
|
||||||
|
onClick={() => { navigate(`/${problem.name}`); }}
|
||||||
|
sx={{ '&:last-child td, &:last-child th': { border: 0 }, cursor: 'pointer' }}
|
||||||
|
>
|
||||||
|
<TableCell component="th" scope="row">
|
||||||
|
{problem.name}
|
||||||
|
</TableCell>
|
||||||
|
{
|
||||||
|
(() => {
|
||||||
|
const status = problem.acs_per_strategy.parse_only;
|
||||||
|
const running = problem.running_tasks.some((t: Task) => t.type === 'Parse');
|
||||||
|
|
||||||
|
const color = acsWithGraphOptToColor(status, running);
|
||||||
|
const text = acsWithGraphOptToText(status, running);
|
||||||
|
|
||||||
|
return <TableCell align="center"><Chip color={color} label={`${text} (${problem.parsing_used} Parsing)`} sx={{ cursor: 'inherit' }} /></TableCell>;
|
||||||
|
})()
|
||||||
|
}
|
||||||
|
{
|
||||||
|
STRATEGIES_WITHOUT_PARSE.map((strategy) => {
|
||||||
|
const status = problem.acs_per_strategy[strategy.replace(/^([A-Z])/, (_, p1) => p1.toLowerCase()).replace(/([A-Z])/g, (_, p1) => `_${p1.toLowerCase()}`) as StrategySnakeCase];
|
||||||
|
const running = problem.running_tasks.some((t: Task) => t.type === 'Solve' && t.content === strategy);
|
||||||
|
|
||||||
|
const color = acsWithGraphOptToColor(status, running);
|
||||||
|
const text = acsWithGraphOptToText(status, running);
|
||||||
|
|
||||||
|
return <TableCell key={strategy} align="center"><Chip color={color} label={text} sx={{ cursor: 'inherit' }} /></TableCell>;
|
||||||
|
})
|
||||||
|
}
|
||||||
|
</TableRow>
|
||||||
|
))}
|
||||||
|
</TableBody>
|
||||||
|
</Table>
|
||||||
|
</TableContainer>
|
||||||
|
</Paper>
|
||||||
|
</Container>
|
||||||
|
)}
|
||||||
|
<AdfNewForm fetchProblems={fetchProblems} />
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default AdfOverview;
|
||||||
155
frontend/src/components/app.tsx
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
import React, { useState, useMemo } from 'react';
|
||||||
|
|
||||||
|
import { createBrowserRouter, RouterProvider } from 'react-router-dom';
|
||||||
|
|
||||||
|
import { ThemeProvider, createTheme } from '@mui/material/styles';
|
||||||
|
import {
|
||||||
|
Alert,
|
||||||
|
AlertColor,
|
||||||
|
Backdrop,
|
||||||
|
Container,
|
||||||
|
CircularProgress,
|
||||||
|
CssBaseline,
|
||||||
|
Link,
|
||||||
|
Snackbar,
|
||||||
|
Stack,
|
||||||
|
useMediaQuery,
|
||||||
|
} from '@mui/material';
|
||||||
|
|
||||||
|
import LoadingContext from './loading-context';
|
||||||
|
import SnackbarContext from './snackbar-context';
|
||||||
|
import Footer from './footer';
|
||||||
|
import AdfOverview from './adf-overview';
|
||||||
|
import AdfDetails from './adf-details';
|
||||||
|
|
||||||
|
const browserRouter = createBrowserRouter([
|
||||||
|
{
|
||||||
|
path: '/',
|
||||||
|
element: <AdfOverview />,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
path: '/:adfName',
|
||||||
|
element: <AdfDetails />,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
|
||||||
|
function App() {
|
||||||
|
const prefersDarkMode = useMediaQuery('(prefers-color-scheme: dark)');
|
||||||
|
|
||||||
|
const theme = useMemo(
|
||||||
|
() => createTheme({
|
||||||
|
palette: {
|
||||||
|
mode: prefersDarkMode ? 'dark' : 'light',
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
[prefersDarkMode],
|
||||||
|
);
|
||||||
|
|
||||||
|
const [loading, setLoading] = useState(false);
|
||||||
|
const loadingContext = useMemo(() => ({ loading, setLoading }), [loading, setLoading]);
|
||||||
|
|
||||||
|
const [snackbarInfo, setSnackbarInfo] = useState<{
|
||||||
|
message: string,
|
||||||
|
severity: AlertColor,
|
||||||
|
potentialUserChange: boolean,
|
||||||
|
} | undefined>();
|
||||||
|
const snackbarContext = useMemo(
|
||||||
|
() => ({ status: snackbarInfo, setStatus: setSnackbarInfo }),
|
||||||
|
[snackbarInfo, setSnackbarInfo],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ThemeProvider theme={theme}>
|
||||||
|
<LoadingContext.Provider value={loadingContext}>
|
||||||
|
<SnackbarContext.Provider value={snackbarContext}>
|
||||||
|
<CssBaseline />
|
||||||
|
<main style={{ maxHeight: 'calc(100vh - 70px)', overflowY: 'auto' }}>
|
||||||
|
<RouterProvider router={browserRouter} />
|
||||||
|
|
||||||
|
<Container sx={{ marginTop: 4 }}>
|
||||||
|
<Stack direction="row" justifyContent="center" flexWrap="wrap">
|
||||||
|
<Link href="https://www.innosale.eu/" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img
|
||||||
|
src={new URL('../innosale-logo.png', import.meta.url).toString()}
|
||||||
|
alt="InnoSale Logo"
|
||||||
|
height="40"
|
||||||
|
style={{
|
||||||
|
display: 'inline-block', borderRadius: 4, margin: 2, boxShadow: '0 0 5px 0 rgba(0,0,0,0.4)', padding: 8, background: '#FFFFFF',
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</Link>
|
||||||
|
<Link href="https://scads.ai/" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img
|
||||||
|
src={new URL('../scads-logo.png', import.meta.url).toString()}
|
||||||
|
alt="Scads.AI Logo"
|
||||||
|
height="40"
|
||||||
|
style={{
|
||||||
|
display: 'inline-block', borderRadius: 4, margin: 2, boxShadow: '0 0 5px 0 rgba(0,0,0,0.4)', padding: 2, background: '#FFFFFF',
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</Link>
|
||||||
|
<Link href="https://secai.org/" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img
|
||||||
|
src={new URL('../secai-logo.png', import.meta.url).toString()}
|
||||||
|
alt="Secai Logo"
|
||||||
|
height="40"
|
||||||
|
style={{
|
||||||
|
display: 'inline-block', borderRadius: 4, margin: 2, boxShadow: '0 0 5px 0 rgba(0,0,0,0.4)',
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</Link>
|
||||||
|
<Link href="https://perspicuous-computing.science" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img
|
||||||
|
src={new URL('../cpec-logo.png', import.meta.url).toString()}
|
||||||
|
alt="CPEC Logo"
|
||||||
|
height="40"
|
||||||
|
style={{
|
||||||
|
display: 'inline-block', borderRadius: 4, margin: 2, boxShadow: '0 0 5px 0 rgba(0,0,0,0.4)', padding: 8, background: '#FFFFFF',
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</Link>
|
||||||
|
<Link href="https://iccl.inf.tu-dresden.de" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img
|
||||||
|
src={new URL('../iccl-logo.png', import.meta.url).toString()}
|
||||||
|
alt="ICCL Logo"
|
||||||
|
height="40"
|
||||||
|
style={{
|
||||||
|
display: 'inline-block', borderRadius: 4, margin: 2, boxShadow: '0 0 5px 0 rgba(0,0,0,0.4)', padding: 4, background: '#FFFFFF',
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</Link>
|
||||||
|
<Link href="https://tu-dresden.de" target="_blank" rel="noopener noreferrer">
|
||||||
|
<img
|
||||||
|
src={new URL('../tud-logo.png', import.meta.url).toString()}
|
||||||
|
alt="TU Dresden Logo"
|
||||||
|
height="40"
|
||||||
|
style={{
|
||||||
|
display: 'inline-block', borderRadius: 4, margin: 2, boxShadow: '0 0 5px 0 rgba(0,0,0,0.4)',
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</Link>
|
||||||
|
</Stack>
|
||||||
|
</Container>
|
||||||
|
</main>
|
||||||
|
|
||||||
|
<Footer />
|
||||||
|
|
||||||
|
<Backdrop
|
||||||
|
open={loading}
|
||||||
|
>
|
||||||
|
<CircularProgress color="inherit" />
|
||||||
|
</Backdrop>
|
||||||
|
<Snackbar
|
||||||
|
open={!!snackbarInfo}
|
||||||
|
autoHideDuration={10000}
|
||||||
|
onClose={() => setSnackbarInfo(undefined)}
|
||||||
|
>
|
||||||
|
<Alert severity={snackbarInfo?.severity}>{snackbarInfo?.message}</Alert>
|
||||||
|
</Snackbar>
|
||||||
|
</SnackbarContext.Provider>
|
||||||
|
</LoadingContext.Provider>
|
||||||
|
</ThemeProvider>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default App;
|
||||||
247
frontend/src/components/footer.tsx
Normal file
@ -0,0 +1,247 @@
|
|||||||
|
import React, {
|
||||||
|
useState, useCallback, useContext, useEffect, useRef,
|
||||||
|
} from 'react';
|
||||||
|
|
||||||
|
import {
|
||||||
|
AlertColor,
|
||||||
|
Alert,
|
||||||
|
AppBar,
|
||||||
|
Box,
|
||||||
|
Button,
|
||||||
|
Dialog,
|
||||||
|
DialogActions,
|
||||||
|
DialogContent,
|
||||||
|
DialogTitle,
|
||||||
|
Link,
|
||||||
|
TextField,
|
||||||
|
Toolbar,
|
||||||
|
} from '@mui/material';
|
||||||
|
|
||||||
|
import LoadingContext from './loading-context';
|
||||||
|
import SnackbarContext from './snackbar-context';
|
||||||
|
|
||||||
|
enum UserFormType {
|
||||||
|
Login = 'Login',
|
||||||
|
Register = 'Register',
|
||||||
|
Update = 'Update',
|
||||||
|
}
|
||||||
|
|
||||||
|
interface UserFormProps {
|
||||||
|
formType: UserFormType | null;
|
||||||
|
close: (message?: string, severity?: AlertColor) => void;
|
||||||
|
username?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
function UserForm({ username: propUsername, formType, close }: UserFormProps) {
|
||||||
|
const { setLoading } = useContext(LoadingContext);
|
||||||
|
const [username, setUsername] = useState<string>(propUsername || '');
|
||||||
|
const [password, setPassword] = useState<string>('');
|
||||||
|
const [errorOccurred, setError] = useState<boolean>(false);
|
||||||
|
|
||||||
|
const submitHandler = useCallback(
|
||||||
|
(del: boolean) => {
|
||||||
|
setLoading(true);
|
||||||
|
setError(false);
|
||||||
|
|
||||||
|
let method; let
|
||||||
|
endpoint;
|
||||||
|
if (del) {
|
||||||
|
method = 'DELETE';
|
||||||
|
endpoint = '/users/delete';
|
||||||
|
} else {
|
||||||
|
switch (formType) {
|
||||||
|
case UserFormType.Login:
|
||||||
|
method = 'POST';
|
||||||
|
endpoint = '/users/login';
|
||||||
|
break;
|
||||||
|
case UserFormType.Register:
|
||||||
|
method = 'POST';
|
||||||
|
endpoint = '/users/register';
|
||||||
|
break;
|
||||||
|
case UserFormType.Update:
|
||||||
|
method = 'PUT';
|
||||||
|
endpoint = '/users/update';
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
// NOTE: the value is not null when the dialog is open
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}${endpoint}`, {
|
||||||
|
method,
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: !del ? JSON.stringify({ username, password }) : undefined,
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
close(`Action '${del ? 'Delete' : formType}' successful!`, 'success');
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
setError(true);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.finally(() => setLoading(false));
|
||||||
|
},
|
||||||
|
[username, password, formType],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<form onSubmit={(e) => { e.preventDefault(); submitHandler(false); }}>
|
||||||
|
<DialogTitle>{formType}</DialogTitle>
|
||||||
|
<DialogContent>
|
||||||
|
<TextField
|
||||||
|
variant="standard"
|
||||||
|
type="text"
|
||||||
|
label="Username"
|
||||||
|
value={username}
|
||||||
|
onChange={(event) => { setUsername(event.target.value); }}
|
||||||
|
/>
|
||||||
|
<br />
|
||||||
|
<TextField
|
||||||
|
variant="standard"
|
||||||
|
type="password"
|
||||||
|
label="Password"
|
||||||
|
value={password}
|
||||||
|
onChange={(event) => { setPassword(event.target.value); }}
|
||||||
|
/>
|
||||||
|
{errorOccurred
|
||||||
|
&& <Alert severity="error">Check your inputs!</Alert>}
|
||||||
|
</DialogContent>
|
||||||
|
<DialogActions>
|
||||||
|
<Button type="button" onClick={() => close()}>Cancel</Button>
|
||||||
|
<Button type="submit" variant="contained" color="success">{formType}</Button>
|
||||||
|
{formType === UserFormType.Update
|
||||||
|
// TODO: add another confirm dialog here
|
||||||
|
&& (
|
||||||
|
<Button
|
||||||
|
type="button"
|
||||||
|
variant="outlined"
|
||||||
|
color="error"
|
||||||
|
onClick={() => {
|
||||||
|
// eslint-disable-next-line no-alert
|
||||||
|
if (window.confirm('Are you sure that you want to delete your account?')) {
|
||||||
|
submitHandler(true);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Delete Account
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</DialogActions>
|
||||||
|
</form>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
UserForm.defaultProps = { username: undefined };
|
||||||
|
|
||||||
|
function Footer() {
|
||||||
|
const { status: snackbarInfo, setStatus: setSnackbarInfo } = useContext(SnackbarContext);
|
||||||
|
const [username, setUsername] = useState<string>();
|
||||||
|
const [tempUser, setTempUser] = useState<boolean>();
|
||||||
|
const [dialogTypeOpen, setDialogTypeOpen] = useState<UserFormType | null>(null);
|
||||||
|
|
||||||
|
const isFirstRender = useRef(true);
|
||||||
|
|
||||||
|
const logout = useCallback(() => {
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}/users/logout`, {
|
||||||
|
method: 'DELETE',
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
setSnackbarInfo({ message: 'Logout successful!', severity: 'success', potentialUserChange: true });
|
||||||
|
setUsername(undefined);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
setSnackbarInfo({ message: 'An error occurred while trying to log out.', severity: 'error', potentialUserChange: false });
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}, [setSnackbarInfo]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
// TODO: having the info if the user may have changed on the snackbar info
|
||||||
|
// is a bit lazy and unclean; be better!
|
||||||
|
if (isFirstRender.current || snackbarInfo?.potentialUserChange) {
|
||||||
|
isFirstRender.current = false;
|
||||||
|
|
||||||
|
fetch(`${process.env.NODE_ENV === 'development' ? '//localhost:8080' : ''}/users/info`, {
|
||||||
|
method: 'GET',
|
||||||
|
credentials: process.env.NODE_ENV === 'development' ? 'include' : 'same-origin',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.then((res) => {
|
||||||
|
switch (res.status) {
|
||||||
|
case 200:
|
||||||
|
res.json().then(({ username: user, temp }) => {
|
||||||
|
setUsername(user);
|
||||||
|
setTempUser(temp);
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
setUsername(undefined);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}, [snackbarInfo?.potentialUserChange]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<AppBar position="fixed" sx={{ top: 'auto', bottom: 0 }}>
|
||||||
|
<Toolbar sx={{ justifyContent: 'center', alignItems: 'center' }}>
|
||||||
|
<Box sx={{ flexGrow: 1 }}>
|
||||||
|
{username ? (
|
||||||
|
<>
|
||||||
|
<span>
|
||||||
|
Logged in as:
|
||||||
|
{' '}
|
||||||
|
{username}
|
||||||
|
{' '}
|
||||||
|
{tempUser ? '(Temporary User. Edit to set a password!)' : undefined}
|
||||||
|
</span>
|
||||||
|
<Button color="inherit" onClick={() => setDialogTypeOpen(UserFormType.Update)}>Edit</Button>
|
||||||
|
{!tempUser && <Button color="inherit" onClick={() => logout()}>Logout</Button>}
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<>
|
||||||
|
<Button color="inherit" onClick={() => setDialogTypeOpen(UserFormType.Login)}>Login</Button>
|
||||||
|
<Button color="inherit" onClick={() => setDialogTypeOpen(UserFormType.Register)}>Register</Button>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</Box>
|
||||||
|
|
||||||
|
<Link color="inherit" href="/legal.html" target="_blank" sx={{ fontSize: '0.8rem' }}>
|
||||||
|
Legal Information (Impressum and Data Protection Regulation)
|
||||||
|
</Link>
|
||||||
|
</Toolbar>
|
||||||
|
</AppBar>
|
||||||
|
<Dialog open={!!dialogTypeOpen} onClose={() => setDialogTypeOpen(null)}>
|
||||||
|
<UserForm
|
||||||
|
formType={dialogTypeOpen}
|
||||||
|
close={(message, severity) => {
|
||||||
|
setDialogTypeOpen(null);
|
||||||
|
setSnackbarInfo((!!message && !!severity)
|
||||||
|
? { message, severity, potentialUserChange: true }
|
||||||
|
: undefined);
|
||||||
|
}}
|
||||||
|
username={dialogTypeOpen === UserFormType.Update ? username : undefined}
|
||||||
|
/>
|
||||||
|
</Dialog>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default Footer;
|
||||||
381
frontend/src/components/graph-g6.tsx
Normal file
@ -0,0 +1,381 @@
|
|||||||
|
import React, { useEffect, useRef } from 'react';
|
||||||
|
|
||||||
|
import G6, { Graph } from '@antv/g6';
|
||||||
|
|
||||||
|
G6.registerNode('nodeWithFlag', {
|
||||||
|
draw(cfg, group) {
|
||||||
|
const mainWidth = Math.max(30, 5 * (cfg!.mainLabel as string).length + 10);
|
||||||
|
const mainHeight = 30;
|
||||||
|
|
||||||
|
const keyShape = group!.addShape('rect', {
|
||||||
|
attrs: {
|
||||||
|
width: mainWidth,
|
||||||
|
height: mainHeight,
|
||||||
|
radius: 2,
|
||||||
|
fill: 'white',
|
||||||
|
stroke: 'black',
|
||||||
|
cursor: 'pointer',
|
||||||
|
},
|
||||||
|
name: 'rectMainLabel',
|
||||||
|
draggable: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
group!.addShape('text', {
|
||||||
|
attrs: {
|
||||||
|
x: mainWidth / 2,
|
||||||
|
y: mainHeight / 2,
|
||||||
|
textAlign: 'center',
|
||||||
|
textBaseline: 'middle',
|
||||||
|
text: cfg!.mainLabel,
|
||||||
|
fill: '#212121',
|
||||||
|
fontFamily: 'Roboto',
|
||||||
|
cursor: 'pointer',
|
||||||
|
},
|
||||||
|
// must be assigned in G6 3.3 and later versions. it can be any value you want
|
||||||
|
name: 'textMailLabel',
|
||||||
|
// allow the shape to response the drag events
|
||||||
|
draggable: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (cfg!.subLabel) {
|
||||||
|
const subWidth = 5 * (cfg!.subLabel as string).length + 4;
|
||||||
|
const subHeight = 20;
|
||||||
|
|
||||||
|
const subRectX = mainWidth - 4;
|
||||||
|
const subRectY = -subHeight + 4;
|
||||||
|
|
||||||
|
group!.addShape('rect', {
|
||||||
|
attrs: {
|
||||||
|
x: subRectX,
|
||||||
|
y: subRectY,
|
||||||
|
width: subWidth,
|
||||||
|
height: subHeight,
|
||||||
|
radius: 1,
|
||||||
|
fill: '#4caf50',
|
||||||
|
stroke: '#1b5e20',
|
||||||
|
cursor: 'pointer',
|
||||||
|
},
|
||||||
|
name: 'rectMainLabel',
|
||||||
|
draggable: true,
|
||||||
|
});
|
||||||
|
|
||||||
|
group!.addShape('text', {
|
||||||
|
attrs: {
|
||||||
|
x: subRectX + subWidth / 2,
|
||||||
|
y: subRectY + subHeight / 2,
|
||||||
|
textAlign: 'center',
|
||||||
|
textBaseline: 'middle',
|
||||||
|
text: cfg!.subLabel,
|
||||||
|
fill: '#212121',
|
||||||
|
fontFamily: 'Roboto',
|
||||||
|
fontSize: 10,
|
||||||
|
cursor: 'pointer',
|
||||||
|
},
|
||||||
|
// must be assigned in G6 3.3 and later versions. it can be any value you want
|
||||||
|
name: 'textMailLabel',
|
||||||
|
// allow the shape to response the drag events
|
||||||
|
draggable: true,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
return keyShape;
|
||||||
|
},
|
||||||
|
getAnchorPoints() {
|
||||||
|
return [[0.5, 0], [0, 0.5], [1, 0.5], [0.5, 1]];
|
||||||
|
},
|
||||||
|
// nodeStateStyles: {
|
||||||
|
// hover: {
|
||||||
|
// fill: 'lightsteelblue',
|
||||||
|
// },
|
||||||
|
// highlight: {
|
||||||
|
// lineWidth: 3,
|
||||||
|
// },
|
||||||
|
// lowlight: {
|
||||||
|
// opacity: 0.3,
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
setState(name, value, item) {
|
||||||
|
if (!item) { return; }
|
||||||
|
const group = item.getContainer();
|
||||||
|
const mainShape = group.get('children')[0]; // Find the first graphics shape of the node. It is determined by the order of being added
|
||||||
|
const subShape = group.get('children')[2];
|
||||||
|
|
||||||
|
if (name === 'hover') {
|
||||||
|
if (value) {
|
||||||
|
mainShape.attr('fill', 'lightsteelblue');
|
||||||
|
} else {
|
||||||
|
mainShape.attr('fill', 'white');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (name === 'highlight') {
|
||||||
|
if (value) {
|
||||||
|
mainShape.attr('lineWidth', 3);
|
||||||
|
} else {
|
||||||
|
mainShape.attr('lineWidth', 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (name === 'lowlight') {
|
||||||
|
if (value) {
|
||||||
|
mainShape.attr('opacity', 0.3);
|
||||||
|
if (subShape) {
|
||||||
|
subShape.attr('opacity', 0.3);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mainShape.attr('opacity', 1);
|
||||||
|
if (subShape) {
|
||||||
|
subShape.attr('opacity', 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
export interface GraphProps {
|
||||||
|
lo_edges: [string, string][],
|
||||||
|
hi_edges: [string, string][],
|
||||||
|
node_labels: { [key: string]: string },
|
||||||
|
tree_root_labels: { [key: string]: string[] },
|
||||||
|
}
|
||||||
|
|
||||||
|
function nodesAndEdgesFromGraphProps(graphProps: GraphProps) {
|
||||||
|
const nodes = Object.keys(graphProps.node_labels).map((id) => {
|
||||||
|
const mainLabel = graphProps.node_labels[id];
|
||||||
|
const subLabel = graphProps.tree_root_labels[id].length > 0 ? `Root for: ${graphProps.tree_root_labels[id].join(' ; ')}` : undefined;
|
||||||
|
|
||||||
|
// const label = subLabel.length > 0 ? `${mainLabel}\n${subLabel}` : mainLabel;
|
||||||
|
|
||||||
|
return {
|
||||||
|
id: id.toString(),
|
||||||
|
mainLabel,
|
||||||
|
subLabel,
|
||||||
|
// style: {
|
||||||
|
// height: subLabel.length > 0 ? 60 : 30,
|
||||||
|
// width: Math.max(30, 5 * mainLabel.length + 10, 5 * subLabel.length + 10),
|
||||||
|
// },
|
||||||
|
};
|
||||||
|
});
|
||||||
|
const edges = graphProps.lo_edges.map(([source, target]) => ({
|
||||||
|
id: `LO_${source}_${target}`, source: source.toString(), target: target.toString(), style: { stroke: '#ed6c02', lineWidth: 2 },
|
||||||
|
}))
|
||||||
|
.concat(graphProps.hi_edges.map(([source, target]) => ({
|
||||||
|
id: `HI_${source}_${target}`, source: source.toString(), target: target.toString(), style: { stroke: '#1976d2', lineWidth: 2 },
|
||||||
|
})));
|
||||||
|
|
||||||
|
return { nodes, edges };
|
||||||
|
}
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
graph: GraphProps,
|
||||||
|
}
|
||||||
|
|
||||||
|
function GraphG6(props: Props) {
|
||||||
|
const { graph: graphProps } = props;
|
||||||
|
|
||||||
|
const ref = useRef(null);
|
||||||
|
|
||||||
|
const graphRef = useRef<Graph>();
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => {
|
||||||
|
if (!graphRef.current) {
|
||||||
|
graphRef.current = new Graph({
|
||||||
|
container: ref.current!,
|
||||||
|
height: 800,
|
||||||
|
fitView: true,
|
||||||
|
modes: {
|
||||||
|
default: ['drag-canvas', 'zoom-canvas', 'drag-node'],
|
||||||
|
},
|
||||||
|
layout: {
|
||||||
|
type: 'dagre',
|
||||||
|
rankdir: 'BT',
|
||||||
|
},
|
||||||
|
// defaultNode: {
|
||||||
|
// anchorPoints: [[0.5, 0], [0, 0.5], [1, 0.5], [0.5, 1]],
|
||||||
|
// type: 'rect',
|
||||||
|
// style: {
|
||||||
|
// radius: 2,
|
||||||
|
// },
|
||||||
|
// labelCfg: {
|
||||||
|
// style: {
|
||||||
|
/// / fontWeight: 700,
|
||||||
|
// fontFamily: 'Roboto',
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
defaultNode: { type: 'nodeWithFlag' },
|
||||||
|
defaultEdge: {
|
||||||
|
style: {
|
||||||
|
endArrow: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// nodeStateStyles: {
|
||||||
|
// hover: {
|
||||||
|
// fill: 'lightsteelblue',
|
||||||
|
// },
|
||||||
|
// highlight: {
|
||||||
|
// lineWidth: 3,
|
||||||
|
// },
|
||||||
|
// lowlight: {
|
||||||
|
// opacity: 0.3,
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
edgeStateStyles: {
|
||||||
|
lowlight: {
|
||||||
|
opacity: 0.3,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
animate: true,
|
||||||
|
animateCfg: {
|
||||||
|
duration: 500,
|
||||||
|
easing: 'easePolyInOut',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const graph = graphRef.current;
|
||||||
|
|
||||||
|
// Mouse enter a node
|
||||||
|
graph.on('node:mouseenter', (e) => {
|
||||||
|
const nodeItem = e.item!; // Get the target item
|
||||||
|
graph.setItemState(nodeItem, 'hover', true); // Set the state 'hover' of the item to be true
|
||||||
|
});
|
||||||
|
|
||||||
|
// Mouse leave a node
|
||||||
|
graph.on('node:mouseleave', (e) => {
|
||||||
|
const nodeItem = e.item!; // Get the target item
|
||||||
|
graph.setItemState(nodeItem, 'hover', false); // Set the state 'hover' of the item to be false
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => {
|
||||||
|
const graph = graphRef.current!;
|
||||||
|
|
||||||
|
// Click a node
|
||||||
|
graph.on('node:click', (e) => {
|
||||||
|
const nodeItem = e.item!; // et the clicked item
|
||||||
|
|
||||||
|
let onlyRemoveStates = false;
|
||||||
|
if (nodeItem.hasState('highlight')) {
|
||||||
|
onlyRemoveStates = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const clickNodes = graph.findAllByState('node', 'highlight');
|
||||||
|
clickNodes.forEach((cn) => {
|
||||||
|
graph.setItemState(cn, 'highlight', false);
|
||||||
|
});
|
||||||
|
|
||||||
|
const lowlightNodes = graph.findAllByState('node', 'lowlight');
|
||||||
|
lowlightNodes.forEach((cn) => {
|
||||||
|
graph.setItemState(cn, 'lowlight', false);
|
||||||
|
});
|
||||||
|
const lowlightEdges = graph.findAllByState('edge', 'lowlight');
|
||||||
|
lowlightEdges.forEach((cn) => {
|
||||||
|
graph.setItemState(cn, 'lowlight', false);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (onlyRemoveStates) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
graph.getNodes().forEach((node) => {
|
||||||
|
graph.setItemState(node, 'lowlight', true);
|
||||||
|
});
|
||||||
|
graph.getEdges().forEach((edge) => {
|
||||||
|
graph.setItemState(edge, 'lowlight', true);
|
||||||
|
});
|
||||||
|
|
||||||
|
const relevantNodeIds: string[] = [];
|
||||||
|
const relevantLoEdges: [string, string][] = [];
|
||||||
|
const relevantHiEdges: [string, string][] = [];
|
||||||
|
let newNodeIds: string[] = [nodeItem.getModel().id!];
|
||||||
|
let newLoEdges: [string, string][] = [];
|
||||||
|
let newHiEdges: [string, string][] = [];
|
||||||
|
|
||||||
|
while (newNodeIds.length > 0 || newLoEdges.length > 0 || newHiEdges.length > 0) {
|
||||||
|
relevantNodeIds.push(...newNodeIds);
|
||||||
|
relevantLoEdges.push(...newLoEdges);
|
||||||
|
relevantHiEdges.push(...newHiEdges);
|
||||||
|
|
||||||
|
newLoEdges = graphProps.lo_edges
|
||||||
|
.filter((edge) => relevantNodeIds.includes(edge[0].toString())
|
||||||
|
&& !relevantLoEdges.includes(edge));
|
||||||
|
newHiEdges = graphProps.hi_edges
|
||||||
|
.filter((edge) => relevantNodeIds.includes(edge[0].toString())
|
||||||
|
&& !relevantHiEdges.includes(edge));
|
||||||
|
|
||||||
|
newNodeIds = newLoEdges
|
||||||
|
.concat(newHiEdges)
|
||||||
|
.map((edge) => edge[1].toString())
|
||||||
|
.filter((id) => !relevantNodeIds.includes(id));
|
||||||
|
}
|
||||||
|
|
||||||
|
const relevantEdgeIds = relevantLoEdges
|
||||||
|
.map(([source, target]) => `LO_${source}_${target}`)
|
||||||
|
.concat(
|
||||||
|
relevantHiEdges
|
||||||
|
.map(([source, target]) => `HI_${source}_${target}`),
|
||||||
|
);
|
||||||
|
|
||||||
|
relevantNodeIds
|
||||||
|
.forEach((id) => {
|
||||||
|
graph.setItemState(id, 'lowlight', false);
|
||||||
|
graph.setItemState(id, 'highlight', true);
|
||||||
|
});
|
||||||
|
|
||||||
|
relevantEdgeIds
|
||||||
|
.forEach((id) => {
|
||||||
|
graph.setItemState(id, 'lowlight', false);
|
||||||
|
});
|
||||||
|
|
||||||
|
// graph.setItemState(nodeItem, 'lowlight', false);
|
||||||
|
// graph.setItemState(nodeItem, 'highlight', true);
|
||||||
|
// nodeItem.getEdges().forEach((edge) => {
|
||||||
|
// graph.setItemState(edge, 'lowlight', false);
|
||||||
|
// });
|
||||||
|
});
|
||||||
|
|
||||||
|
return () => { graph.off('node:click'); };
|
||||||
|
},
|
||||||
|
[graphProps],
|
||||||
|
);
|
||||||
|
|
||||||
|
useEffect(
|
||||||
|
() => {
|
||||||
|
const graph = graphRef.current!;
|
||||||
|
|
||||||
|
const { nodes, edges } = nodesAndEdgesFromGraphProps(graphProps);
|
||||||
|
|
||||||
|
graph.changeData({
|
||||||
|
nodes,
|
||||||
|
edges,
|
||||||
|
});
|
||||||
|
},
|
||||||
|
[graphProps],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<div ref={ref} style={{ overflow: 'hidden' }} />
|
||||||
|
<div style={{ padding: 4 }}>
|
||||||
|
<span style={{ color: '#ed6c02', marginRight: 8 }}>lo edge (condition is false)</span>
|
||||||
|
{' '}
|
||||||
|
<span style={{ color: '#1976d2', marginRight: 8 }}>hi edge (condition is true)</span>
|
||||||
|
{' '}
|
||||||
|
Click nodes to hightlight paths! (You can also drag and zoom.)
|
||||||
|
<br />
|
||||||
|
The
|
||||||
|
{' '}
|
||||||
|
<span style={{ color: '#4caf50' }}>Root for: X</span>
|
||||||
|
{' '}
|
||||||
|
labels indicate where to start looking to determine the truth value of statement X.
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default GraphG6;
|
||||||
13
frontend/src/components/loading-context.ts
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import { createContext } from 'react';
|
||||||
|
|
||||||
|
interface ILoadingContext {
|
||||||
|
loading: boolean;
|
||||||
|
setLoading: (loading: boolean) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
const LoadingContext = createContext<ILoadingContext>({
|
||||||
|
loading: false,
|
||||||
|
setLoading: () => {},
|
||||||
|
});
|
||||||
|
|
||||||
|
export default LoadingContext;
|
||||||
58
frontend/src/components/markdown.tsx
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
import React from 'react';
|
||||||
|
import ReactMarkdown from 'markdown-to-jsx';
|
||||||
|
import {
|
||||||
|
Box,
|
||||||
|
Link,
|
||||||
|
Typography,
|
||||||
|
} from '@mui/material';
|
||||||
|
|
||||||
|
const options = {
|
||||||
|
overrides: {
|
||||||
|
h1: {
|
||||||
|
component: Typography,
|
||||||
|
props: {
|
||||||
|
gutterBottom: true,
|
||||||
|
variant: 'h4',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
h2: {
|
||||||
|
component: Typography,
|
||||||
|
props: { gutterBottom: true, variant: 'h6' },
|
||||||
|
},
|
||||||
|
h3: {
|
||||||
|
component: Typography,
|
||||||
|
props: { gutterBottom: true, variant: 'subtitle1' },
|
||||||
|
},
|
||||||
|
h4: {
|
||||||
|
component: Typography,
|
||||||
|
props: {
|
||||||
|
gutterBottom: true,
|
||||||
|
variant: 'caption',
|
||||||
|
paragraph: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
p: {
|
||||||
|
component: Typography,
|
||||||
|
props: { paragraph: true, sx: { '&:last-child': { marginBottom: 0 } } },
|
||||||
|
},
|
||||||
|
a: {
|
||||||
|
component: (props: any) => (
|
||||||
|
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||||
|
<Link target="_blank" rel="noopener noreferrer" {...props} />
|
||||||
|
),
|
||||||
|
},
|
||||||
|
li: {
|
||||||
|
component: (props: any) => (
|
||||||
|
<Box component="li" sx={{ mt: 1 }}>
|
||||||
|
{/* eslint-disable-next-line react/jsx-props-no-spreading */}
|
||||||
|
<Typography component="span" {...props} />
|
||||||
|
</Box>
|
||||||
|
),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
export default function Markdown(props: any) {
|
||||||
|
// eslint-disable-next-line react/jsx-props-no-spreading
|
||||||
|
return <ReactMarkdown options={options} {...props} />;
|
||||||
|
}
|
||||||
17
frontend/src/components/snackbar-context.ts
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
import { createContext } from 'react';
|
||||||
|
|
||||||
|
import { AlertColor } from '@mui/material';
|
||||||
|
|
||||||
|
type Status = { message: string, severity: AlertColor, potentialUserChange: boolean } | undefined;
|
||||||
|
|
||||||
|
interface ISnackbarContext {
|
||||||
|
status: Status;
|
||||||
|
setStatus: (status: Status) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SnackbarContext = createContext<ISnackbarContext>({
|
||||||
|
status: undefined,
|
||||||
|
setStatus: () => {},
|
||||||
|
});
|
||||||
|
|
||||||
|
export default SnackbarContext;
|
||||||
BIN
frontend/src/cpec-logo.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
37
frontend/src/help-texts/add-info.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
ADF-BDD.dev allows you to solve Abstract Dialectical Frameworks (ADFs). The ADFs are represented as Binary Decision Diagrams (BDDs).
|
||||||
|
The Web UI mimics many options of the CLI version of the [underlying adf-bdd tool](https://github.com/ellmau/adf-obdd). The syntax for the ADF code is indentical.
|
||||||
|
|
||||||
|
In the below form, you can either type/paste your `code` or upload a file in the same format.
|
||||||
|
To put it briefly, an ADF consists of statements and accectance conditions for these statements.
|
||||||
|
For instance, the following code indicates that `a,b,c,d` are statements, that `a` is assumed to be true (verum), `b` is true if `b` is true (which is self-supporting), `c` is true if `a` and `b` are true, and `d` is true if `b` is false.
|
||||||
|
|
||||||
|
```
|
||||||
|
s(a).
|
||||||
|
s(b).
|
||||||
|
s(c).
|
||||||
|
s(d).
|
||||||
|
ac(a,c(v)).
|
||||||
|
ac(b,b).
|
||||||
|
ac(c,and(a,b)).
|
||||||
|
ac(d,neg(b)).
|
||||||
|
```
|
||||||
|
|
||||||
|
Internally, the ADF is respresented as a BDD.
|
||||||
|
The `Parsing Strategy` determines the internal implementation used for these. `Naive` uses the own BDD implementation of our tool. `Hybrid` mixes our approaches with the existing Rust BDD library [`biodivine`](https://crates.io/crates/biodivine-lib-bdd). Don't be concerned about this choice if you are new to this tool; just pick either one.
|
||||||
|
You will get a view on the BDD in the detail view after you added the problem.
|
||||||
|
|
||||||
|
You can optionally set a name for you ADF problem. Otherwise a random name will be chosen. At the moment the name cannot be changed later (but you could remove and re-add the problem).
|
||||||
|
|
||||||
|
We also support adding AFs in the ICCMA competition format. They are converted to ADFs internally in the obvious way.
|
||||||
|
For example you can try the following code and change the option below from ADF to AF.
|
||||||
|
|
||||||
|
```
|
||||||
|
p af 5
|
||||||
|
# this is a comment
|
||||||
|
1 2
|
||||||
|
2 4
|
||||||
|
4 5
|
||||||
|
5 4
|
||||||
|
5 5
|
||||||
|
```
|
||||||
|
|
||||||
13
frontend/src/help-texts/detail-info.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
First of all you can review the code that you added. You can also delete the problem if you made a mistake or do not need it anymore.
|
||||||
|
|
||||||
|
Further below, you can have a look at the BDD representations of your problem using different semantics.
|
||||||
|
In principle, each statement gets it's own BDD that indicates how its truth value can be obtained from the other ones. Note that every BDD has the `BOT` and `TOP` nodes ultimately indicating the truth value (false or true respectively).
|
||||||
|
All these individual BDDs are displayed in a merged representation where the `Root for:` labels tell you where to start looking if you want to
|
||||||
|
get the truth value of an individual statement.
|
||||||
|
For instance, consider a BDD that (besides `BOT` and `TOP`) only contains a node `b` annotated with `Root for: a` and the annotation `Root for: b` at the `TOP` node.
|
||||||
|
Since the root for `b` is the `TOP` node, we know that `b` must be true. Then, to obtain the truth value for `a`, we start at the `b` and since we know that `b` must be true, we can follow the blue edge to obtain the value for `a` (we will end up in `BOT` or `TOP` there). If `b` would be false, we would follow the orange edge analogously. Note that is not always possible to directly determine the truth values of statements (which is exactly why we need tools like this).
|
||||||
|
|
||||||
|
On the very left, you can view the initial representation of your problem after parsing. This also indicates the parsing strategy that you have chosen (`Naive` or `Hybrid`).
|
||||||
|
The other tabs allow you to solve the problem using different semantics and optimizations. Some of them (e.g. `complete`) may produce multiple models that you can cycle through.
|
||||||
|
To get a better idea of the differences, you can have a look at the [command line tool](https://github.com/ellmau/adf-obdd/tree/main/bin).
|
||||||
|
|
||||||
BIN
frontend/src/help-texts/example-bdd.png
Normal file
|
After Width: | Height: | Size: 51 KiB |
BIN
frontend/src/iccl-logo.png
Normal file
|
After Width: | Height: | Size: 22 KiB |
18
frontend/src/index.html
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8"/>
|
||||||
|
<meta name="viewport" content="initial-scale=1, width=device-width" />
|
||||||
|
<title>ADF-OBDD Web Visualizer</title>
|
||||||
|
<script type="module" src="app.tsx"></script>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<noscript>
|
||||||
|
<h1>ADF-BDD.DEV</h1>
|
||||||
|
<p>Turn on Javascript in your browser to use our ADF tool!</p>
|
||||||
|
<a href="./legal.html" target="_blank">Legal Information (Impressum and Data Protection Regulation)</a>
|
||||||
|
</noscript>
|
||||||
|
<div id="app"></div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
|
||||||
BIN
frontend/src/innosale-logo.png
Normal file
|
After Width: | Height: | Size: 8.7 KiB |
212
frontend/src/legal.html
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>ADF-BDD.dev - Legal Notice</title>
|
||||||
|
<meta
|
||||||
|
name="description"
|
||||||
|
content="Impressum and Data Protection Regulation for adf-bdd.dev"
|
||||||
|
/>
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: Helvetica;
|
||||||
|
}
|
||||||
|
h1 {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
section {
|
||||||
|
max-width: 1000px;
|
||||||
|
margin: 0 auto 32px;
|
||||||
|
padding: 16px;
|
||||||
|
box-shadow: 0 0 10px 0px rgba(0, 0, 0, 0.4);
|
||||||
|
}
|
||||||
|
section > :first-child {
|
||||||
|
margin-top: 0;
|
||||||
|
}
|
||||||
|
section > :last-child {
|
||||||
|
margin-bottom: 0;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<header>
|
||||||
|
<h1>ADF-BDD.DEV Legal Notice</h1>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<section>
|
||||||
|
<h2>Impressum</h2>
|
||||||
|
|
||||||
|
The
|
||||||
|
<a
|
||||||
|
href="https://tu-dresden.de/impressum?set_language=en"
|
||||||
|
target="_blank"
|
||||||
|
rel="noreferrer noopener"
|
||||||
|
>Impressum of TU Dresden</a
|
||||||
|
>
|
||||||
|
applies with the following amendments:
|
||||||
|
|
||||||
|
<h3>Responsibilities - Content and Technical Implementation</h3>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Dipl.-Inf. Lukas Gerlach<br />
|
||||||
|
Technische Universität Dresden<br />
|
||||||
|
Fakultät Informatik<br />
|
||||||
|
Institut für Theoretische Informatik<br />
|
||||||
|
Professur für Wissensbasierte Systeme<br />
|
||||||
|
01062 Dresden<br />
|
||||||
|
GERMANY
|
||||||
|
</p>
|
||||||
|
<p>
|
||||||
|
Email: lukas.gerlach@tu-dresden.de<br />
|
||||||
|
Phone: (+49) 351 / 463 43503
|
||||||
|
</p>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section>
|
||||||
|
<h2>Data Protection Regulation</h2>
|
||||||
|
<p>
|
||||||
|
We process your personal data only in form of metadata that is
|
||||||
|
send to us when you access the website. This is done to pursue
|
||||||
|
our legitimate interest of providing and improving this publicly
|
||||||
|
available website (https://adf-bdd.dev). To this aim, this
|
||||||
|
metadata is also written to server log files. The data may
|
||||||
|
contain the following of your personal information: public IP
|
||||||
|
address, time of access, internet browser (e.g. user agent,
|
||||||
|
version), operating system, referrer url, hostname of requesting
|
||||||
|
machine. We only set cookies that are necessary for the
|
||||||
|
provision of our service, i.e. to check if a user is logged in.
|
||||||
|
</p>
|
||||||
|
<h3>
|
||||||
|
Data Processed for Website Provisioning and Log File Creation:
|
||||||
|
Log Files for Website Provisioning
|
||||||
|
</h3>
|
||||||
|
<p>
|
||||||
|
We use Cloudflare to resolve DNS requests for our website. To
|
||||||
|
ensure the security and performance of our website, we log
|
||||||
|
technical errors that may occur when accessing our website.
|
||||||
|
Additionally, information that your device's browser
|
||||||
|
automatically transmits to our server is collected. This
|
||||||
|
information includes:
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<ul>
|
||||||
|
<li>IP address and operating system of your device,</li>
|
||||||
|
<li>Browser type, version, language,</li>
|
||||||
|
<li>
|
||||||
|
The website from which the access was made (referrer URL),
|
||||||
|
</li>
|
||||||
|
<li>The status code (e.g., 404), and</li>
|
||||||
|
<li>The transmission protocol used (e.g., http/2).</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
The processing of this data is based on our legitimate interest
|
||||||
|
according to Art. 6(1)(f) GDPR. Our legitimate interest lies in
|
||||||
|
troubleshooting, optimizing, and ensuring the performance of our
|
||||||
|
website, as well as guaranteeing the security of our network and
|
||||||
|
systems. We do not use the data to personally identify
|
||||||
|
individual users unless there is a legal reason to do so or
|
||||||
|
explicit consent is obtained from you.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Cloudflare acts as an intermediary between your browser and our
|
||||||
|
server. When a DNS record is set to "Proxied," Cloudflare
|
||||||
|
answers DNS queries with a Cloudflare Anycast IP address instead
|
||||||
|
of the actual IP address of our server. This directs HTTP/HTTPS
|
||||||
|
requests to the Cloudflare network, which offers advantages in
|
||||||
|
terms of security and performance. Cloudflare also hides the IP
|
||||||
|
address of our origin server, making it more difficult for
|
||||||
|
attackers to directly target it.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Cloudflare may store certain data related to DNS requests,
|
||||||
|
including IP addresses. However, Cloudflare anonymizes IP
|
||||||
|
addresses by truncating the last octets for IPv4 and the last 80
|
||||||
|
bits for IPv6. The truncated IP addresses are deleted within 25
|
||||||
|
hours. Cloudflare is committed to not selling or sharing users'
|
||||||
|
personal data with third parties and not using the data for
|
||||||
|
targeted advertising. For more information on data protection at
|
||||||
|
Cloudflare, please see the Cloudflare Privacy Policy:
|
||||||
|
<a href="https://www.cloudflare.com/de-de/privacypolicy/"
|
||||||
|
>https://www.cloudflare.com/de-de/privacypolicy/</a
|
||||||
|
>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
To meet the requirements of the GDPR, we have entered into a
|
||||||
|
Data Processing Agreement (DPA) with Cloudflare, which ensures
|
||||||
|
that Cloudflare processes the data on our behalf and in
|
||||||
|
accordance with applicable data protection regulations. You have
|
||||||
|
the right to access, rectify, erase, restrict processing, and
|
||||||
|
data portability of your personal data. Please contact us if you
|
||||||
|
wish to exercise these rights.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<p>
|
||||||
|
Please note that our website is hosted on our own servers, and
|
||||||
|
Cloudflare merely serves as a DNS provider and proxy. We
|
||||||
|
implement appropriate technical and organizational measures to
|
||||||
|
ensure the protection of your data.
|
||||||
|
</p>
|
||||||
|
<h3>Legal basis</h3>
|
||||||
|
<p>
|
||||||
|
The legal basis for the data processing is
|
||||||
|
<a
|
||||||
|
href="https://gdpr.eu/article-6-how-to-process-personal-data-legally/"
|
||||||
|
target="_blank"
|
||||||
|
rel="noreferrer noopener"
|
||||||
|
>Section §6 para.1 lit. f GDPR</a
|
||||||
|
>.
|
||||||
|
</p>
|
||||||
|
<h3>Rights of data subjects</h3>
|
||||||
|
<ul>
|
||||||
|
<li>
|
||||||
|
You have the right to obtain information from TU Dresden
|
||||||
|
about the data stored about your person and/or to have
|
||||||
|
incorrectly stored data corrected.
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
You have the right to erasure or restriction of the
|
||||||
|
processing and/or a right to object to the processing.
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
You can contact TU Dresden's Data Protection Officer at any
|
||||||
|
time.
|
||||||
|
<p>
|
||||||
|
Tel.: +49 351 / 463 32839<br />
|
||||||
|
Fax: +49 351 / 463 39718<br />
|
||||||
|
Email: informationssicherheit@tu-dresden.de<br />
|
||||||
|
<a
|
||||||
|
href="https://tu-dresden.de/informationssicherheit"
|
||||||
|
target="_blank"
|
||||||
|
rel="noreferrer noopener"
|
||||||
|
>https://tu-dresden.de/informationssicherheit</a
|
||||||
|
>
|
||||||
|
</p>
|
||||||
|
</li>
|
||||||
|
<li>
|
||||||
|
You also have the right to complain to a supervisory
|
||||||
|
authority if you are concerned that the processing of your
|
||||||
|
personal data is an infringement of the law. The competent
|
||||||
|
supervisory authority for data protection is:
|
||||||
|
<p>
|
||||||
|
Saxon Data Protection Commissioner<br />
|
||||||
|
Ms. Dr. Juliane Hundert<br />
|
||||||
|
Maternistraße 17<br />
|
||||||
|
01067 Dresden<br />
|
||||||
|
Email: post@sdtb.sachsen.de<br />
|
||||||
|
Phone: + 49 351 / 85471 101<br />
|
||||||
|
<a
|
||||||
|
href="http://www.datenschutz.sachsen.de"
|
||||||
|
target="_blank"
|
||||||
|
rel="noreferrer noopener"
|
||||||
|
>www.datenschutz.sachsen.de</a
|
||||||
|
>
|
||||||
|
</p>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
</section>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
BIN
frontend/src/scads-logo.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
BIN
frontend/src/secai-logo.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
19
frontend/src/test-data.ts
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
const testData = [
|
||||||
|
{ label: 'BOT', lo: 0, hi: 0 },
|
||||||
|
{ label: 'TOP', lo: 1, hi: 1 },
|
||||||
|
{ label: 'Var8', lo: 0, hi: 1 },
|
||||||
|
{ label: 'Var7', lo: 1, hi: 0 },
|
||||||
|
{ label: 'Var0', lo: 3, hi: 1 },
|
||||||
|
{ label: 'Var9', lo: 0, hi: 1 },
|
||||||
|
{ label: 'Var8', lo: 5, hi: 0 },
|
||||||
|
{ label: 'Var0', lo: 6, hi: 5 },
|
||||||
|
{ label: 'Var1', lo: 0, hi: 1 },
|
||||||
|
{ label: 'Var0', lo: 1, hi: 0 },
|
||||||
|
{ label: 'Var9', lo: 1, hi: 0 },
|
||||||
|
{ label: 'Var8', lo: 0, hi: 10 },
|
||||||
|
{ label: 'Var0', lo: 5, hi: 0 },
|
||||||
|
{ label: 'Var8', lo: 1, hi: 0 },
|
||||||
|
{ label: 'Var5', lo: 13, hi: 0 },
|
||||||
|
];
|
||||||
|
|
||||||
|
export default testData;
|
||||||
BIN
frontend/src/tud-logo.png
Normal file
|
After Width: | Height: | Size: 20 KiB |
103
frontend/tsconfig.json
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
/* Visit https://aka.ms/tsconfig to read more about this file */
|
||||||
|
|
||||||
|
/* Projects */
|
||||||
|
// "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
|
||||||
|
// "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
|
||||||
|
// "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */
|
||||||
|
// "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */
|
||||||
|
// "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
|
||||||
|
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
|
||||||
|
|
||||||
|
/* Language and Environment */
|
||||||
|
"target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
|
||||||
|
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
|
||||||
|
"jsx": "preserve", /* Specify what JSX code is generated. */
|
||||||
|
// "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */
|
||||||
|
// "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
|
||||||
|
// "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
|
||||||
|
// "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
|
||||||
|
// "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
|
||||||
|
// "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
|
||||||
|
// "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
|
||||||
|
// "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
|
||||||
|
// "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */
|
||||||
|
|
||||||
|
/* Modules */
|
||||||
|
"module": "esnext", /* Specify what module code is generated. */
|
||||||
|
// "rootDir": "./", /* Specify the root folder within your source files. */
|
||||||
|
"moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */
|
||||||
|
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
|
||||||
|
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
|
||||||
|
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
|
||||||
|
// "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
|
||||||
|
// "types": [], /* Specify type package names to be included without being referenced in a source file. */
|
||||||
|
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
|
||||||
|
// "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */
|
||||||
|
// "resolveJsonModule": true, /* Enable importing .json files. */
|
||||||
|
// "noResolve": true, /* Disallow 'import's, 'require's or '<reference>'s from expanding the number of files TypeScript should add to a project. */
|
||||||
|
|
||||||
|
/* JavaScript Support */
|
||||||
|
// "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
|
||||||
|
// "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
|
||||||
|
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */
|
||||||
|
|
||||||
|
/* Emit */
|
||||||
|
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
|
||||||
|
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
|
||||||
|
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
|
||||||
|
// "sourceMap": true, /* Create source map files for emitted JavaScript files. */
|
||||||
|
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
|
||||||
|
// "outDir": "./", /* Specify an output folder for all emitted files. */
|
||||||
|
// "removeComments": true, /* Disable emitting comments. */
|
||||||
|
// "noEmit": true, /* Disable emitting files from a compilation. */
|
||||||
|
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
|
||||||
|
// "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */
|
||||||
|
// "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
|
||||||
|
// "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
|
||||||
|
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
|
||||||
|
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
|
||||||
|
// "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
|
||||||
|
// "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
|
||||||
|
// "newLine": "crlf", /* Set the newline character for emitting files. */
|
||||||
|
// "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
|
||||||
|
// "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */
|
||||||
|
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
|
||||||
|
// "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */
|
||||||
|
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
|
||||||
|
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
|
||||||
|
|
||||||
|
/* Interop Constraints */
|
||||||
|
// "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
|
||||||
|
// "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
|
||||||
|
"esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
|
||||||
|
// "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
|
||||||
|
"forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
|
||||||
|
|
||||||
|
/* Type Checking */
|
||||||
|
"strict": true, /* Enable all strict type-checking options. */
|
||||||
|
// "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */
|
||||||
|
// "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */
|
||||||
|
// "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
|
||||||
|
// "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
|
||||||
|
// "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
|
||||||
|
// "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */
|
||||||
|
// "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */
|
||||||
|
// "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
|
||||||
|
// "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */
|
||||||
|
// "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */
|
||||||
|
// "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
|
||||||
|
// "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
|
||||||
|
// "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
|
||||||
|
// "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */
|
||||||
|
// "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
|
||||||
|
// "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */
|
||||||
|
// "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
|
||||||
|
// "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
|
||||||
|
|
||||||
|
/* Completeness */
|
||||||
|
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
|
||||||
|
"skipLibCheck": true /* Skip type checking all .d.ts files. */
|
||||||
|
}
|
||||||
|
}
|
||||||
4163
frontend/yarn.lock
Normal file
@ -1,10 +1,11 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "adf_bdd"
|
name = "adf_bdd"
|
||||||
version = "0.2.2"
|
version = "0.3.1"
|
||||||
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
|
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
homepage = "https://ellmau.github.io/adf-obdd/"
|
||||||
repository = "https://github.com/ellmau/adf-obdd/"
|
repository = "https://github.com/ellmau/adf-obdd/"
|
||||||
license = "GPL-3.0-only"
|
license = "MIT"
|
||||||
exclude = ["res/", "./flake*", "flake.lock", "*.nix", ".envrc", "_config.yml", "tarpaulin-report.*", "*~"]
|
exclude = ["res/", "./flake*", "flake.lock", "*.nix", ".envrc", "_config.yml", "tarpaulin-report.*", "*~"]
|
||||||
|
|
||||||
description = "Library to solve grounded, complete, and stable ADF-semantics by utilising OBDDs - ordered binary decision diagrams"
|
description = "Library to solve grounded, complete, and stable ADF-semantics by utilising OBDDs - ordered binary decision diagrams"
|
||||||
@ -23,22 +24,29 @@ crate-type = ["lib"] # The crate types to generate.
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
log = { version = "0.4"}
|
log = { version = "0.4"}
|
||||||
nom = "7.1.0"
|
nom = "7.1.3"
|
||||||
lexical-sort = "0.3.1"
|
lexical-sort = "0.3.1"
|
||||||
serde = { version = "1.0", features = ["derive","rc"] }
|
serde = { version = "1.0", features = ["derive","rc"] }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
biodivine-lib-bdd = "0.3.0"
|
biodivine-lib-bdd = "0.5.0"
|
||||||
derivative = "2.2.0"
|
derivative = "2.2.0"
|
||||||
|
roaring = "0.10.1"
|
||||||
|
strum = { version = "0.24", features = ["derive"] }
|
||||||
|
crossbeam-channel = "0.5"
|
||||||
|
rand = {version = "0.8.5", features = ["std_rng"]}
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
test-log = "0.2"
|
test-log = "0.2"
|
||||||
env_logger = "0.9"
|
env_logger = "0.10"
|
||||||
quickcheck = "1"
|
quickcheck = "1"
|
||||||
quickcheck_macros = "1"
|
quickcheck_macros = "1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["adhoccounting", "variablelist" ]
|
default = ["adhoccounting", "variablelist", "frontend" ]
|
||||||
adhoccounting = [] # count models ad-hoc - disable if counting is not needed
|
adhoccounting = [] # count paths ad-hoc - disable if counting is not needed
|
||||||
importexport = []
|
importexport = []
|
||||||
variablelist = [ "HashSet" ]
|
variablelist = [ "HashSet" ]
|
||||||
HashSet = []
|
HashSet = []
|
||||||
|
adhoccountmodels = [ "adhoccounting" ] # count models as well as paths ad-hoc note that facet methods will need this feature too
|
||||||
|
benchmark = ["adhoccounting", "variablelist"] # set of features for speed benchmarks
|
||||||
|
frontend = []
|
||||||
@ -1 +0,0 @@
|
|||||||
../LICENSE
|
|
||||||
21
lib/LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2022 Stefan Ellmauthaler
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
@ -1 +0,0 @@
|
|||||||
../README.md
|
|
||||||
168
lib/README.md
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
[](https://crates.io/crates/adf_bdd)
|
||||||
|
[](https://docs.rs/adf_bdd/latest/adf_bdd/)
|
||||||
|

|
||||||
|
[](https://coveralls.io/github/ellmau/adf-obdd)
|
||||||
|

|
||||||
|
 
|
||||||
|
[](https://github.com/ellmau/adf-obdd/releases)
|
||||||
|

|
||||||
|
[](https://github.com/ellmau/adf-obdd/discussions) 
|
||||||
|
|
||||||
|
# Abstract Dialectical Frameworks solved by Binary Decision Diagrams; developed in Dresden (ADF-BDD)
|
||||||
|
This library contains an efficient representation of Abstract Dialectical Frameworks (ADf) by utilising an implementation of Ordered Binary Decision Diagrams (OBDD)
|
||||||
|
|
||||||
|
## Abstract Dialectical Frameworks
|
||||||
|
|
||||||
|
An abstract dialectical framework consists of abstract statements. Each statement has an unique label and might be related to other statements (s) in the ADF. This relation is defined by a so-called acceptance condition (ac), which intuitively is a propositional formula, where the variable symbols are the labels of the statements. An interpretation is a three valued function which maps to each statement a truth value (true, false, undecided). We call such an interpretation a model, if each acceptance condition agrees to the interpration.
|
||||||
|
|
||||||
|
### Noteworthy relations between semantics
|
||||||
|
|
||||||
|
They can be easily identified though:
|
||||||
|
|
||||||
|
* The computation is always in the same order
|
||||||
|
* grd
|
||||||
|
* com
|
||||||
|
* stm
|
||||||
|
* We know that there is always exactly one grounded model
|
||||||
|
* We know that there always exist at least one complete model (i.e. the grounded one)
|
||||||
|
* We know that there does not need to exist a stable model
|
||||||
|
* We know that every stable model is a complete model too
|
||||||
|
|
||||||
|
|
||||||
|
## Ordered Binary Decision Diagram
|
||||||
|
|
||||||
|
An ordered binary decision diagram is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
|
||||||
|
|
||||||
|
Note that one advantage of this implementation is that only one oBDD is used for all acceptance conditions. This can be done because all of them have the identical signature (i.e. the set of all statements + top and bottom concepts). Due to this uniform representation reductions on subformulae which are shared by two or more statements only need to be computed once and is already cached in the data structure for further applications.
|
||||||
|
|
||||||
|
The used algorithm to create a BDD, based on a given formula does not perform well on bigger formulae, therefore it is possible to use a state-of-the art library to instantiate the BDD (https://github.com/sybila/biodivine-lib-bdd). It is possible to either stay with the biodivine library or switch back to the variant implemented by adf-bdd. The variant implemented in this library offers reuse of already done reductions and memoisation techniques, which are not offered by biodivine. In addition some further features, like counter-model counting is not supported by biodivine.
|
||||||
|
|
||||||
|
Note that import and export only works if the naive library is chosen
|
||||||
|
|
||||||
|
## Input-file format:
|
||||||
|
|
||||||
|
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement. The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
||||||
|
```plain
|
||||||
|
and(x,y): conjunction
|
||||||
|
or(x,y): disjunctin
|
||||||
|
iff(x,Y): if and only if
|
||||||
|
xor(x,y): exclusive or
|
||||||
|
neg(x): classical negation
|
||||||
|
c(v): constant symbol “verum” - tautology/top
|
||||||
|
c(f): constant symbol “falsum” - inconsistency/bot
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example input file:
|
||||||
|
```plain
|
||||||
|
s(a).
|
||||||
|
s(b).
|
||||||
|
s(c).
|
||||||
|
s(d).
|
||||||
|
|
||||||
|
ac(a,c(v)).
|
||||||
|
ac(b,or(a,b)).
|
||||||
|
ac(c,neg(b)).
|
||||||
|
ac(d,d).
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage examples
|
||||||
|
|
||||||
|
First parse a given ADF and sort the statements, if needed.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use adf_bdd::parser::AdfParser;
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
// use the above example as input
|
||||||
|
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
match parser.parse()(&input) {
|
||||||
|
Ok(_) => log::info!("[Done] parsing"),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!(
|
||||||
|
"Error during parsing:\n{} \n\n cannot continue, panic!",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
panic!("Parsing failed, see log for further details")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// sort lexicographic
|
||||||
|
parser.varsort_lexi();
|
||||||
|
```
|
||||||
|
use the naive/in-crate implementation
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// create Adf
|
||||||
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
// compute and print the complete models
|
||||||
|
let printer = adf.print_dictionary();
|
||||||
|
for model in adf.complete() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
use the biodivine implementation
|
||||||
|
```rust
|
||||||
|
// create Adf
|
||||||
|
let adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser);
|
||||||
|
// compute and print the complete models
|
||||||
|
let printer = adf.print_dictionary();
|
||||||
|
for model in adf.complete() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
use the hybrid approach implementation
|
||||||
|
```rust
|
||||||
|
// create biodivine Adf
|
||||||
|
let badf = adf_bdd::adfbiodivine::Adf::from_parser(&parser);
|
||||||
|
// instantiate the internally used adf after the reduction done by biodivine
|
||||||
|
let mut adf = badf.hybrid_step();
|
||||||
|
// compute and print the complete models
|
||||||
|
let printer = adf.print_dictionary();
|
||||||
|
for model in adf.complete() {
|
||||||
|
print!("{}", printer.print_interpretation(&model));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
use the new `NoGood`-based algorithm and utilise the new interface with channels:
|
||||||
|
```rust
|
||||||
|
use adf_bdd::parser::AdfParser;
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
use adf_bdd::adf::heuristics::Heuristic;
|
||||||
|
use adf_bdd::datatypes::{Term, adf::VarContainer};
|
||||||
|
// create a channel
|
||||||
|
let (s, r) = crossbeam_channel::unbounded();
|
||||||
|
let variables = VarContainer::default();
|
||||||
|
let variables_worker = variables.clone();
|
||||||
|
// spawn a solver thread
|
||||||
|
let solving = std::thread::spawn(move || {
|
||||||
|
// use the above example as input
|
||||||
|
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
|
||||||
|
let parser = AdfParser::with_var_container(variables_worker);
|
||||||
|
parser.parse()(&input).expect("parsing worked well");
|
||||||
|
// use hybrid approach
|
||||||
|
let mut adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser).hybrid_step();
|
||||||
|
// compute stable with the simple heuristic
|
||||||
|
adf.stable_nogood_channel(Heuristic::Simple, s);
|
||||||
|
});
|
||||||
|
let printer = variables.print_dictionary();
|
||||||
|
// print results as they are computed
|
||||||
|
while let Ok(result) = r.recv() {
|
||||||
|
print!("stable model: {:?} \n", result);
|
||||||
|
// use dictionary
|
||||||
|
print!("stable model with variable names: {}", printer.print_interpretation(&result));
|
||||||
|
# assert_eq!(result, vec![Term(1),Term(1),Term(0),Term(0)]);
|
||||||
|
}
|
||||||
|
// waiting for the other thread to close
|
||||||
|
solving.join().unwrap();
|
||||||
|
```
|
||||||
|
|
||||||
|
# Acknowledgements
|
||||||
|
This work is partly supported by Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) in projects number 389792660 (TRR 248, [Center for Perspicuous Systems](https://www.perspicuous-computing.science/)),
|
||||||
|
the Bundesministerium für Bildung und Forschung (BMBF, Federal Ministry of Education and Research) in the
|
||||||
|
[Center for Scalable Data Analytics and Artificial Intelligence](https://www.scads.de) (ScaDS.AI),
|
||||||
|
and by the [Center for Advancing Electronics Dresden](https://cfaed.tu-dresden.de) (cfaed).
|
||||||
|
|
||||||
|
# Affiliation
|
||||||
|
This work has been partly developed by the [Knowledge-Based Systems Group](http://kbs.inf.tu-dresden.de/), [Faculty of Computer Science](https://tu-dresden.de/ing/informatik) of [TU Dresden](https://tu-dresden.de).
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
Hosting content here does not establish any formal or legal relation to TU Dresden.
|
||||||
@ -15,7 +15,7 @@ fn main() {
|
|||||||
fn gen_tests() {
|
fn gen_tests() {
|
||||||
let out_dir = env::var("OUT_DIR").unwrap();
|
let out_dir = env::var("OUT_DIR").unwrap();
|
||||||
let destination = Path::new(&out_dir).join("tests.rs");
|
let destination = Path::new(&out_dir).join("tests.rs");
|
||||||
let mut test_file = File::create(&destination).unwrap();
|
let mut test_file = File::create(destination).unwrap();
|
||||||
|
|
||||||
if let Ok(test_data_directory) = read_dir("../res/adf-instances/instances/") {
|
if let Ok(test_data_directory) = read_dir("../res/adf-instances/instances/") {
|
||||||
// write test file header, put `use`, `const` etc there
|
// write test file header, put `use`, `const` etc there
|
||||||
|
|||||||
672
lib/src/adf.rs
@ -1,11 +1,12 @@
|
|||||||
/*!
|
/*!
|
||||||
This module describes the abstract dialectical framework
|
This module describes the abstract dialectical framework.
|
||||||
|
|
||||||
- computing interpretations
|
- computing interpretations and models
|
||||||
- computing fixpoints
|
- computing fixpoints
|
||||||
*/
|
*/
|
||||||
|
|
||||||
use serde::{Deserialize, Serialize};
|
pub mod heuristics;
|
||||||
|
use std::cell::RefCell;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
datatypes::{
|
datatypes::{
|
||||||
@ -15,18 +16,28 @@ use crate::{
|
|||||||
},
|
},
|
||||||
FacetCounts, ModelCounts, Term, Var,
|
FacetCounts, ModelCounts, Term, Var,
|
||||||
},
|
},
|
||||||
|
nogoods::{NoGood, NoGoodStore},
|
||||||
obdd::Bdd,
|
obdd::Bdd,
|
||||||
parser::{AdfParser, Formula},
|
parser::{AdfParser, Formula},
|
||||||
};
|
};
|
||||||
|
use rand::{rngs::StdRng, SeedableRng};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use self::heuristics::Heuristic;
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
/// Representation of an ADF, with an ordering and dictionary of statement <-> number relations, a binary decision diagram, and a list of acceptance functions in Term representation.
|
/// Representation of an ADF, with an ordering and dictionary which relates statements to numbers, a binary decision diagram, and a list of acceptance conditions in [`Term`][crate::datatypes::Term] representation.
|
||||||
///
|
///
|
||||||
/// Please note that due to the nature of the underlying reduced and ordered Bdd the concept of a [`Term`][crate::datatypes::Term] represents one (sub) formula as well as truth-values.
|
/// Please note that due to the nature of the underlying reduced and ordered Bdd the concept of a [`Term`][crate::datatypes::Term] represents one (sub) formula as well as truth-values.
|
||||||
pub struct Adf {
|
pub struct Adf {
|
||||||
ordering: VarContainer,
|
/// The ordering or the variables in the ADF including a dictionary for the statements
|
||||||
bdd: Bdd,
|
pub ordering: VarContainer,
|
||||||
ac: Vec<Term>,
|
/// The underlying binary decision diagram that respresents the ADF
|
||||||
|
pub bdd: Bdd,
|
||||||
|
/// Acceptance Conditions for the ADF
|
||||||
|
pub ac: Vec<Term>,
|
||||||
|
#[serde(skip, default = "Adf::default_rng")]
|
||||||
|
rng: RefCell<StdRng>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Adf {
|
impl Default for Adf {
|
||||||
@ -35,28 +46,36 @@ impl Default for Adf {
|
|||||||
ordering: VarContainer::default(),
|
ordering: VarContainer::default(),
|
||||||
bdd: Bdd::new(),
|
bdd: Bdd::new(),
|
||||||
ac: Vec::new(),
|
ac: Vec::new(),
|
||||||
|
rng: Adf::default_rng(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<(VarContainer, Bdd, Vec<Term>)> for Adf {
|
||||||
|
fn from(source: (VarContainer, Bdd, Vec<Term>)) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: source.0,
|
||||||
|
bdd: source.1,
|
||||||
|
ac: source.2,
|
||||||
|
rng: Self::default_rng(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Adf {
|
impl Adf {
|
||||||
/// Instantiates a new ADF, based on the parser-data
|
/// Instantiates a new ADF, based on the [parser-data][crate::parser::AdfParser].
|
||||||
pub fn from_parser(parser: &AdfParser) -> Self {
|
pub fn from_parser(parser: &AdfParser) -> Self {
|
||||||
log::info!("[Start] instantiating BDD");
|
log::info!("[Start] instantiating BDD");
|
||||||
let mut result = Self {
|
let mut result = Self {
|
||||||
ordering: VarContainer::from_parser(
|
ordering: parser.var_container(),
|
||||||
parser.namelist_rc_refcell(),
|
|
||||||
parser.dict_rc_refcell(),
|
|
||||||
),
|
|
||||||
bdd: Bdd::new(),
|
bdd: Bdd::new(),
|
||||||
ac: vec![Term(0); parser.namelist_rc_refcell().as_ref().borrow().len()],
|
ac: vec![Term(0); parser.dict_size()],
|
||||||
|
rng: Adf::default_rng(),
|
||||||
};
|
};
|
||||||
(0..parser.namelist_rc_refcell().borrow().len())
|
(0..parser.dict_size()).for_each(|value| {
|
||||||
.into_iter()
|
log::trace!("adding variable {}", Var(value));
|
||||||
.for_each(|value| {
|
result.bdd.variable(Var(value));
|
||||||
log::trace!("adding variable {}", Var(value));
|
});
|
||||||
result.bdd.variable(Var(value));
|
|
||||||
});
|
|
||||||
log::debug!("[Start] adding acs");
|
log::debug!("[Start] adding acs");
|
||||||
parser
|
parser
|
||||||
.formula_order()
|
.formula_order()
|
||||||
@ -84,9 +103,10 @@ impl Adf {
|
|||||||
bio_ac: &[biodivine_lib_bdd::Bdd],
|
bio_ac: &[biodivine_lib_bdd::Bdd],
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut result = Self {
|
let mut result = Self {
|
||||||
ordering: VarContainer::copy(ordering),
|
ordering: ordering.clone(),
|
||||||
bdd: Bdd::new(),
|
bdd: Bdd::new(),
|
||||||
ac: vec![Term(0); bio_ac.len()],
|
ac: vec![Term(0); bio_ac.len()],
|
||||||
|
rng: Adf::default_rng(),
|
||||||
};
|
};
|
||||||
result
|
result
|
||||||
.ac
|
.ac
|
||||||
@ -131,10 +151,21 @@ impl Adf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
log::trace!("ordering: {:?}", result.ordering);
|
||||||
|
log::trace!("adf {:?} instantiated with bdd {}", result.ac, result.bdd);
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Instantiates a new ADF, based on a biodivine adf
|
fn default_rng() -> RefCell<StdRng> {
|
||||||
|
RefCell::new(StdRng::from_entropy())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets a cryptographiclly strong seed
|
||||||
|
pub fn seed(&mut self, seed: [u8; 32]) {
|
||||||
|
self.rng = RefCell::new(StdRng::from_seed(seed))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Instantiates a new ADF, based on a [biodivine adf][crate::adfbiodivine::Adf].
|
||||||
pub fn from_biodivine(bio_adf: &super::adfbiodivine::Adf) -> Self {
|
pub fn from_biodivine(bio_adf: &super::adfbiodivine::Adf) -> Self {
|
||||||
Self::from_biodivine_vector(bio_adf.var_container(), bio_adf.ac())
|
Self::from_biodivine_vector(bio_adf.var_container(), bio_adf.ac())
|
||||||
}
|
}
|
||||||
@ -179,7 +210,7 @@ impl Adf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the grounded extension and returns it as a list
|
/// Computes the grounded extension and returns it as a list.
|
||||||
pub fn grounded(&mut self) -> Vec<Term> {
|
pub fn grounded(&mut self) -> Vec<Term> {
|
||||||
log::info!("[Start] grounded");
|
log::info!("[Start] grounded");
|
||||||
let ac = &self.ac.clone();
|
let ac = &self.ac.clone();
|
||||||
@ -351,14 +382,32 @@ impl Adf {
|
|||||||
|
|
||||||
/// Computes the stable models.
|
/// Computes the stable models.
|
||||||
/// Returns an iterator which contains all stable models.
|
/// Returns an iterator which contains all stable models.
|
||||||
/// This variant uses the computation of model and counter-model counts.
|
/// This variant uses the heuristic, which uses maximal [var impact][crate::obdd::Bdd::passive_var_impact], minimal [self-cycle impact][crate::obdd::Bdd::active_var_impact] and the minimal amount of [paths][crate::obdd::Bdd::paths].
|
||||||
pub fn stable_count_optimisation<'a, 'c>(&'a mut self) -> impl Iterator<Item = Vec<Term>> + 'c
|
pub fn stable_count_optimisation_heu_a<'a, 'c>(
|
||||||
|
&'a mut self,
|
||||||
|
) -> impl Iterator<Item = Vec<Term>> + 'c
|
||||||
where
|
where
|
||||||
'a: 'c,
|
'a: 'c,
|
||||||
{
|
{
|
||||||
log::debug!("[Start] stable count optimisation");
|
log::debug!("[Start] stable count optimisation");
|
||||||
let grounded = self.grounded();
|
let grounded = self.grounded();
|
||||||
self.two_val_model_counts(&grounded)
|
self.two_val_model_counts(&grounded, Self::heu_max_imp_min_nacyc_impact_min_paths)
|
||||||
|
.into_iter()
|
||||||
|
.filter(|int| self.stability_check(int))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the stable models.
|
||||||
|
/// Returns an iterator which contains all stable models.
|
||||||
|
/// This variant uses the heuristic, which uses minimal number of [paths][crate::obdd::Bdd::paths] and maximal [variable-impact][crate::obdd::Bdd::passive_var_impact].
|
||||||
|
pub fn stable_count_optimisation_heu_b<'a, 'c>(
|
||||||
|
&'a mut self,
|
||||||
|
) -> impl Iterator<Item = Vec<Term>> + 'c
|
||||||
|
where
|
||||||
|
'a: 'c,
|
||||||
|
{
|
||||||
|
log::debug!("[Start] stable count optimisation");
|
||||||
|
let grounded = self.grounded();
|
||||||
|
self.two_val_model_counts(&grounded, Self::heu_min_paths_max_imp)
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|int| self.stability_check(int))
|
.filter(|int| self.stability_check(int))
|
||||||
}
|
}
|
||||||
@ -387,21 +436,91 @@ impl Adf {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn two_val_model_counts(&mut self, interpr: &[Term]) -> Vec<Vec<Term>> {
|
fn is_two_valued(&self, interpretation: &[Term]) -> bool {
|
||||||
log::trace!("two_val_model_counts({:?}) called ", interpr);
|
interpretation.iter().all(|t| t.is_truth_value())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn two_val_model_counts<H>(&mut self, interpr: &[Term], heuristic: H) -> Vec<Vec<Term>>
|
||||||
|
where
|
||||||
|
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy,
|
||||||
|
{
|
||||||
|
self.two_val_model_counts_logic(interpr, &vec![Term::UND; interpr.len()], 0, heuristic)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn heu_max_imp_min_nacyc_impact_min_paths(
|
||||||
|
&self,
|
||||||
|
lhs: (Var, Term),
|
||||||
|
rhs: (Var, Term),
|
||||||
|
interpr: &[Term],
|
||||||
|
) -> std::cmp::Ordering {
|
||||||
|
match self
|
||||||
|
.bdd
|
||||||
|
.passive_var_impact(rhs.0, interpr)
|
||||||
|
.cmp(&self.bdd.passive_var_impact(lhs.0, interpr))
|
||||||
|
{
|
||||||
|
std::cmp::Ordering::Equal => match self
|
||||||
|
.bdd
|
||||||
|
.active_var_impact(lhs.0, interpr)
|
||||||
|
.cmp(&self.bdd.active_var_impact(rhs.0, interpr))
|
||||||
|
{
|
||||||
|
std::cmp::Ordering::Equal => self
|
||||||
|
.bdd
|
||||||
|
.paths(lhs.1, true)
|
||||||
|
.minimum()
|
||||||
|
.cmp(&self.bdd.paths(rhs.1, true).minimum()),
|
||||||
|
value => value,
|
||||||
|
},
|
||||||
|
value => value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn heu_min_paths_max_imp(
|
||||||
|
&self,
|
||||||
|
lhs: (Var, Term),
|
||||||
|
rhs: (Var, Term),
|
||||||
|
interpr: &[Term],
|
||||||
|
) -> std::cmp::Ordering {
|
||||||
|
match self
|
||||||
|
.bdd
|
||||||
|
.paths(lhs.1, true)
|
||||||
|
.minimum()
|
||||||
|
.cmp(&self.bdd.paths(rhs.1, true).minimum())
|
||||||
|
{
|
||||||
|
std::cmp::Ordering::Equal => self
|
||||||
|
.bdd
|
||||||
|
.passive_var_impact(rhs.0, interpr)
|
||||||
|
.cmp(&self.bdd.passive_var_impact(lhs.0, interpr)),
|
||||||
|
|
||||||
|
value => value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn two_val_model_counts_logic<H>(
|
||||||
|
&mut self,
|
||||||
|
interpr: &[Term],
|
||||||
|
will_be: &[Term],
|
||||||
|
depth: usize,
|
||||||
|
heuristic: H,
|
||||||
|
) -> Vec<Vec<Term>>
|
||||||
|
where
|
||||||
|
H: Fn(&Self, (Var, Term), (Var, Term), &[Term]) -> std::cmp::Ordering + Copy,
|
||||||
|
{
|
||||||
|
log::debug!("two_val_model_recursion_depth: {}/{}", depth, interpr.len());
|
||||||
if let Some((idx, ac)) = interpr
|
if let Some((idx, ac)) = interpr
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter(|(_idx, val)| !val.is_truth_value())
|
.filter(|(idx, val)| !(val.is_truth_value() || will_be[*idx].is_truth_value()))
|
||||||
.min_by(|(_idx_a, val_a), (_idx_b, val_b)| {
|
.min_by(|(idx_a, val_a), (idx_b, val_b)| {
|
||||||
self.bdd
|
heuristic(
|
||||||
.models(**val_a, true)
|
self,
|
||||||
.minimum()
|
(Var(*idx_a), **val_a),
|
||||||
.cmp(&self.bdd.models(**val_b, true).minimum())
|
(Var(*idx_b), **val_b),
|
||||||
|
interpr,
|
||||||
|
)
|
||||||
})
|
})
|
||||||
{
|
{
|
||||||
let mut result = Vec::new();
|
let mut result = Vec::new();
|
||||||
let check_models = !self.bdd.models(*ac, true).more_models();
|
let check_models = !self.bdd.paths(*ac, true).more_models();
|
||||||
log::trace!(
|
log::trace!(
|
||||||
"Identified Var({}) with ac {:?} to be {}",
|
"Identified Var({}) with ac {:?} to be {}",
|
||||||
idx,
|
idx,
|
||||||
@ -417,15 +536,16 @@ impl Adf {
|
|||||||
let res = negative
|
let res = negative
|
||||||
.iter()
|
.iter()
|
||||||
.try_for_each(|var| {
|
.try_for_each(|var| {
|
||||||
if new_int[var.value()].is_true() {
|
if new_int[var.value()].is_true() || will_be[var.value()] == Term::TOP {
|
||||||
return Err(());
|
return Err(());
|
||||||
}
|
}
|
||||||
new_int[var.value()] = Term::BOT;
|
new_int[var.value()] = Term::BOT;
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
.and(positive.iter().try_for_each(|var| {
|
.and(positive.iter().try_for_each(|var| {
|
||||||
if new_int[var.value()].is_truth_value()
|
if (new_int[var.value()].is_truth_value()
|
||||||
&& !new_int[var.value()].is_true()
|
&& !new_int[var.value()].is_true())
|
||||||
|
|| will_be[var.value()] == Term::BOT
|
||||||
{
|
{
|
||||||
return Err(());
|
return Err(());
|
||||||
}
|
}
|
||||||
@ -434,37 +554,103 @@ impl Adf {
|
|||||||
}));
|
}));
|
||||||
if res.is_ok() {
|
if res.is_ok() {
|
||||||
new_int[idx] = if check_models { Term::TOP } else { Term::BOT };
|
new_int[idx] = if check_models { Term::TOP } else { Term::BOT };
|
||||||
let upd_int = self.update_interpretation(&new_int);
|
let upd_int = self.update_interpretation_fixpoint(&new_int);
|
||||||
result.append(&mut self.two_val_model_counts(&upd_int));
|
if self.check_consistency(&upd_int, will_be) {
|
||||||
|
result.append(&mut self.two_val_model_counts_logic(
|
||||||
|
&upd_int,
|
||||||
|
will_be,
|
||||||
|
depth + 1,
|
||||||
|
heuristic,
|
||||||
|
));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
});
|
});
|
||||||
|
log::trace!("results found so far:{}", result.len());
|
||||||
// checked one alternative, we can now conclude that only the other option may work
|
// checked one alternative, we can now conclude that only the other option may work
|
||||||
log::trace!("checked one alternative, concluding the other value");
|
log::debug!("checked one alternative, concluding the other value");
|
||||||
let new_int = interpr
|
let new_int = interpr
|
||||||
.iter()
|
.iter()
|
||||||
.map(|tree| self.bdd.restrict(*tree, Var(idx), !check_models))
|
.map(|tree| self.bdd.restrict(*tree, Var(idx), !check_models))
|
||||||
.collect::<Vec<Term>>();
|
.collect::<Vec<Term>>();
|
||||||
let mut upd_int = self.update_interpretation(&new_int);
|
let mut upd_int = self.update_interpretation_fixpoint(&new_int);
|
||||||
|
|
||||||
// TODO: should be "must be true/false" instead of setting it to TOP/BOT and will need sanity checks at every iteration
|
|
||||||
log::trace!("\nnew_int {new_int:?}\nupd_int {upd_int:?}");
|
log::trace!("\nnew_int {new_int:?}\nupd_int {upd_int:?}");
|
||||||
if new_int[idx].no_inf_decrease(&upd_int[idx]) {
|
if new_int[idx].no_inf_inconsistency(&upd_int[idx]) {
|
||||||
upd_int[idx] = if check_models { Term::BOT } else { Term::TOP };
|
upd_int[idx] = if check_models { Term::BOT } else { Term::TOP };
|
||||||
if new_int[idx].no_inf_decrease(&upd_int[idx]) {
|
if new_int[idx].no_inf_inconsistency(&upd_int[idx]) {
|
||||||
result.append(&mut self.two_val_model_counts(&upd_int));
|
let mut must_be_new = will_be.to_vec();
|
||||||
|
must_be_new[idx] = new_int[idx];
|
||||||
|
result.append(&mut self.two_val_model_counts_logic(
|
||||||
|
&upd_int,
|
||||||
|
&must_be_new,
|
||||||
|
depth + 1,
|
||||||
|
heuristic,
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result
|
result
|
||||||
} else {
|
} else {
|
||||||
// filter has created empty iterator
|
// filter has created empty iterator
|
||||||
vec![interpr.to_vec()]
|
let concluded = interpr
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(idx, val)| {
|
||||||
|
if !val.is_truth_value() {
|
||||||
|
will_be[idx]
|
||||||
|
} else {
|
||||||
|
*val
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<Term>>();
|
||||||
|
let ac = self.ac.clone();
|
||||||
|
let result = self.apply_interpretation(&ac, &concluded);
|
||||||
|
if self.check_consistency(&result, &concluded) {
|
||||||
|
vec![result]
|
||||||
|
} else {
|
||||||
|
vec![interpr.to_vec()]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_interpretation_fixpoint(&mut self, interpretation: &[Term]) -> Vec<Term> {
|
||||||
|
let mut cur_int = interpretation.to_vec();
|
||||||
|
loop {
|
||||||
|
let new_int = self.update_interpretation(interpretation);
|
||||||
|
if cur_int == new_int {
|
||||||
|
return cur_int;
|
||||||
|
} else {
|
||||||
|
cur_int = new_int;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructs the fixpoint of the given interpretation with respect to the ADF.
|
||||||
|
/// sets _update_ to [`true`] if the value has been updated and to [`false`] otherwise.
|
||||||
|
fn update_interpretation_fixpoint_upd(
|
||||||
|
&mut self,
|
||||||
|
interpretation: &[Term],
|
||||||
|
update: &mut bool,
|
||||||
|
) -> Vec<Term> {
|
||||||
|
let mut cur_int = interpretation.to_vec();
|
||||||
|
*update = false;
|
||||||
|
loop {
|
||||||
|
let new_int = self.update_interpretation(interpretation);
|
||||||
|
if cur_int == new_int {
|
||||||
|
return cur_int;
|
||||||
|
} else {
|
||||||
|
cur_int = new_int;
|
||||||
|
*update = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_interpretation(&mut self, interpretation: &[Term]) -> Vec<Term> {
|
fn update_interpretation(&mut self, interpretation: &[Term]) -> Vec<Term> {
|
||||||
interpretation
|
self.apply_interpretation(interpretation, interpretation)
|
||||||
.iter()
|
}
|
||||||
|
|
||||||
|
fn apply_interpretation(&mut self, ac: &[Term], interpretation: &[Term]) -> Vec<Term> {
|
||||||
|
ac.iter()
|
||||||
.map(|ac| {
|
.map(|ac| {
|
||||||
interpretation
|
interpretation
|
||||||
.iter()
|
.iter()
|
||||||
@ -480,6 +666,13 @@ impl Adf {
|
|||||||
.collect::<Vec<Term>>()
|
.collect::<Vec<Term>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn check_consistency(&mut self, interpretation: &[Term], will_be: &[Term]) -> bool {
|
||||||
|
interpretation
|
||||||
|
.iter()
|
||||||
|
.zip(will_be.iter())
|
||||||
|
.all(|(int, wb)| wb.no_inf_inconsistency(int))
|
||||||
|
}
|
||||||
|
|
||||||
/// Computes the complete models
|
/// Computes the complete models
|
||||||
/// Returns an Iterator which contains all complete models
|
/// Returns an Iterator which contains all complete models
|
||||||
pub fn complete<'a, 'c>(&'a mut self) -> impl Iterator<Item = Vec<Term>> + 'c
|
pub fn complete<'a, 'c>(&'a mut self) -> impl Iterator<Item = Vec<Term>> + 'c
|
||||||
@ -515,7 +708,7 @@ impl Adf {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creates a [PrintableInterpretation] for output purposes
|
/// Creates a [PrintableInterpretation] for output purposes.
|
||||||
pub fn print_interpretation<'a, 'b>(
|
pub fn print_interpretation<'a, 'b>(
|
||||||
&'a self,
|
&'a self,
|
||||||
interpretation: &'b [Term],
|
interpretation: &'b [Term],
|
||||||
@ -526,12 +719,12 @@ impl Adf {
|
|||||||
PrintableInterpretation::new(interpretation, &self.ordering)
|
PrintableInterpretation::new(interpretation, &self.ordering)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creates a [PrintDictionary] for output purposes
|
/// Creates a [PrintDictionary] for output purposes.
|
||||||
pub fn print_dictionary(&self) -> PrintDictionary {
|
pub fn print_dictionary(&self) -> PrintDictionary {
|
||||||
PrintDictionary::new(&self.ordering)
|
PrintDictionary::new(&self.ordering)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fixes the bdd after an import with serde
|
/// Fixes the bdd after an import with serde.
|
||||||
pub fn fix_import(&mut self) {
|
pub fn fix_import(&mut self) {
|
||||||
self.bdd.fix_import();
|
self.bdd.fix_import();
|
||||||
}
|
}
|
||||||
@ -543,7 +736,7 @@ impl Adf {
|
|||||||
interpretation
|
interpretation
|
||||||
.iter()
|
.iter()
|
||||||
.map(|t| {
|
.map(|t| {
|
||||||
let mcs = self.bdd.models(*t, true);
|
let mcs = self.bdd.models(*t, false);
|
||||||
|
|
||||||
let n_vdps = { |t| self.bdd.var_dependencies(t).len() };
|
let n_vdps = { |t| self.bdd.var_dependencies(t).len() };
|
||||||
|
|
||||||
@ -559,11 +752,193 @@ impl Adf {
|
|||||||
})
|
})
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Computes the stable extensions of a given [`Adf`], using the [`NoGood`]-learner.
|
||||||
|
pub fn stable_nogood<'a, 'c>(
|
||||||
|
&'a mut self,
|
||||||
|
heuristic: Heuristic,
|
||||||
|
) -> impl Iterator<Item = Vec<Term>> + 'c
|
||||||
|
where
|
||||||
|
'a: 'c,
|
||||||
|
{
|
||||||
|
let grounded = self.grounded();
|
||||||
|
let heu = heuristic.get_heuristic();
|
||||||
|
let (s, r) = crossbeam_channel::unbounded::<Vec<Term>>();
|
||||||
|
self.stable_nogood_get_vec(&grounded, heu, s, r).into_iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the stable extension of a given [`Adf`], using the [`NoGood`]-learner.
|
||||||
|
/// Needs a [`Sender`][crossbeam_channel::Sender<Vec<crate::datatypes::Term>>] where the results of the computation can be put to.
|
||||||
|
pub fn stable_nogood_channel(
|
||||||
|
&mut self,
|
||||||
|
heuristic: Heuristic,
|
||||||
|
sender: crossbeam_channel::Sender<Vec<Term>>,
|
||||||
|
) {
|
||||||
|
let grounded = self.grounded();
|
||||||
|
self.nogood_internal(
|
||||||
|
&grounded,
|
||||||
|
heuristic.get_heuristic(),
|
||||||
|
Self::stability_check,
|
||||||
|
sender,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the two valued extension of a given [`Adf`], using the [`NoGood`]-learner.
|
||||||
|
/// Needs a [`Sender`][crossbeam_channel::Sender<Vec<crate::datatypes::Term>>] where the results of the computation can be put to.
|
||||||
|
pub fn two_val_nogood_channel(
|
||||||
|
&mut self,
|
||||||
|
heuristic: Heuristic,
|
||||||
|
sender: crossbeam_channel::Sender<Vec<Term>>,
|
||||||
|
) {
|
||||||
|
let grounded = self.grounded();
|
||||||
|
self.nogood_internal(
|
||||||
|
&grounded,
|
||||||
|
heuristic.get_heuristic(),
|
||||||
|
|_self: &mut Self, _int: &[Term]| true,
|
||||||
|
sender,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stable_nogood_get_vec<H>(
|
||||||
|
&mut self,
|
||||||
|
interpretation: &[Term],
|
||||||
|
heuristic: H,
|
||||||
|
s: crossbeam_channel::Sender<Vec<Term>>,
|
||||||
|
r: crossbeam_channel::Receiver<Vec<Term>>,
|
||||||
|
) -> Vec<Vec<Term>>
|
||||||
|
where
|
||||||
|
H: Fn(&Self, &[Term]) -> Option<(Var, Term)>,
|
||||||
|
{
|
||||||
|
self.nogood_internal(interpretation, heuristic, Self::stability_check, s);
|
||||||
|
r.iter().collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn nogood_internal<H, I>(
|
||||||
|
&mut self,
|
||||||
|
interpretation: &[Term],
|
||||||
|
heuristic: H,
|
||||||
|
stability_check: I,
|
||||||
|
s: crossbeam_channel::Sender<Vec<Term>>,
|
||||||
|
) where
|
||||||
|
H: Fn(&Self, &[Term]) -> Option<(Var, Term)>,
|
||||||
|
I: Fn(&mut Self, &[Term]) -> bool,
|
||||||
|
{
|
||||||
|
let mut cur_interpr = interpretation.to_vec();
|
||||||
|
let mut ng_store = NoGoodStore::new(
|
||||||
|
self.ac
|
||||||
|
.len()
|
||||||
|
.try_into()
|
||||||
|
.expect("Expecting only u32 many statements"),
|
||||||
|
);
|
||||||
|
let mut stack: Vec<(bool, NoGood)> = Vec::new();
|
||||||
|
let mut interpr_history: Vec<Vec<Term>> = Vec::new();
|
||||||
|
let mut backtrack = false;
|
||||||
|
let mut update_ng;
|
||||||
|
let mut update_fp = false;
|
||||||
|
let mut choice = false;
|
||||||
|
|
||||||
|
log::debug!("start learning loop");
|
||||||
|
loop {
|
||||||
|
log::trace!("interpr: {:?}", cur_interpr);
|
||||||
|
log::trace!("choice: {}", choice);
|
||||||
|
if choice {
|
||||||
|
choice = false;
|
||||||
|
if let Some((var, term)) = heuristic(&*self, &cur_interpr) {
|
||||||
|
log::trace!("choose {}->{}", var, term.is_true());
|
||||||
|
interpr_history.push(cur_interpr.to_vec());
|
||||||
|
cur_interpr[var.value()] = term;
|
||||||
|
stack.push((true, cur_interpr.as_slice().into()));
|
||||||
|
} else {
|
||||||
|
backtrack = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
update_ng = true;
|
||||||
|
log::trace!("backtrack: {}", backtrack);
|
||||||
|
if backtrack {
|
||||||
|
backtrack = false;
|
||||||
|
if stack.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
while let Some((choice, ng)) = stack.pop() {
|
||||||
|
log::trace!("adding ng: {:?}", ng);
|
||||||
|
ng_store.add_ng(ng);
|
||||||
|
|
||||||
|
if choice {
|
||||||
|
cur_interpr = interpr_history.pop().expect("both stacks (interpr_history and `stack`) should always be synchronous");
|
||||||
|
log::trace!(
|
||||||
|
"choice found, reverting interpretation to {:?}",
|
||||||
|
cur_interpr
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
match ng_store.conclusion_closure(&cur_interpr) {
|
||||||
|
crate::nogoods::ClosureResult::Update(new_int) => {
|
||||||
|
cur_interpr = new_int;
|
||||||
|
log::trace!("ng update: {:?}", cur_interpr);
|
||||||
|
stack.push((false, cur_interpr.as_slice().into()));
|
||||||
|
}
|
||||||
|
crate::nogoods::ClosureResult::NoUpdate => {
|
||||||
|
log::trace!("no update");
|
||||||
|
update_ng = false;
|
||||||
|
}
|
||||||
|
crate::nogoods::ClosureResult::Inconsistent => {
|
||||||
|
log::trace!("inconsistency");
|
||||||
|
backtrack = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let ac_consistent_interpr = self.apply_interpretation(&self.ac.clone(), &cur_interpr);
|
||||||
|
log::trace!(
|
||||||
|
"checking consistency of {:?} against {:?}",
|
||||||
|
ac_consistent_interpr,
|
||||||
|
cur_interpr
|
||||||
|
);
|
||||||
|
if cur_interpr
|
||||||
|
.iter()
|
||||||
|
.zip(ac_consistent_interpr.iter())
|
||||||
|
.any(|(cur, ac)| {
|
||||||
|
cur.is_truth_value() && ac.is_truth_value() && cur.is_true() != ac.is_true()
|
||||||
|
})
|
||||||
|
{
|
||||||
|
log::trace!("ac_inconsistency");
|
||||||
|
backtrack = true;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
cur_interpr = self.update_interpretation_fixpoint_upd(&cur_interpr, &mut update_fp);
|
||||||
|
if update_fp {
|
||||||
|
log::trace!("fixpount updated");
|
||||||
|
//stack.push((false, cur_interpr.as_slice().into()));
|
||||||
|
} else if !update_ng {
|
||||||
|
// No updates done this loop
|
||||||
|
if !self.is_two_valued(&cur_interpr) {
|
||||||
|
choice = true;
|
||||||
|
} else if stability_check(self, &cur_interpr) {
|
||||||
|
// stable model found
|
||||||
|
stack.push((false, cur_interpr.as_slice().into()));
|
||||||
|
s.send(cur_interpr.clone())
|
||||||
|
.expect("Sender should accept results");
|
||||||
|
backtrack = true;
|
||||||
|
} else {
|
||||||
|
// not stable
|
||||||
|
log::trace!("2 val not stable");
|
||||||
|
stack.push((false, cur_interpr.as_slice().into()));
|
||||||
|
backtrack = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log::info!("{ng_store}");
|
||||||
|
log::debug!("{:?}", ng_store);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crossbeam_channel::unbounded;
|
||||||
use test_log::test;
|
use test_log::test;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -574,11 +949,16 @@ mod test {
|
|||||||
parser.parse()(input).unwrap();
|
parser.parse()(input).unwrap();
|
||||||
|
|
||||||
let adf = Adf::from_parser(&parser);
|
let adf = Adf::from_parser(&parser);
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a");
|
assert_eq!(adf.ordering.name(Var(0)), Some("a".to_string()));
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "c");
|
assert_eq!(adf.ordering.names().read().unwrap()[0], "a");
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "b");
|
assert_eq!(adf.ordering.name(Var(1)), Some("c".to_string()));
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "e");
|
assert_eq!(adf.ordering.names().read().unwrap()[1], "c");
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "d");
|
assert_eq!(adf.ordering.name(Var(2)), Some("b".to_string()));
|
||||||
|
assert_eq!(adf.ordering.names().read().unwrap()[2], "b");
|
||||||
|
assert_eq!(adf.ordering.name(Var(3)), Some("e".to_string()));
|
||||||
|
assert_eq!(adf.ordering.names().read().unwrap()[3], "e");
|
||||||
|
assert_eq!(adf.ordering.name(Var(4)), Some("d".to_string()));
|
||||||
|
assert_eq!(adf.ordering.names().read().unwrap()[4], "d");
|
||||||
|
|
||||||
assert_eq!(adf.ac, vec![Term(4), Term(2), Term(7), Term(15), Term(12)]);
|
assert_eq!(adf.ac, vec![Term(4), Term(2), Term(7), Term(15), Term(12)]);
|
||||||
|
|
||||||
@ -589,11 +969,11 @@ mod test {
|
|||||||
parser.varsort_alphanum();
|
parser.varsort_alphanum();
|
||||||
|
|
||||||
let adf = Adf::from_parser(&parser);
|
let adf = Adf::from_parser(&parser);
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[0], "a");
|
assert_eq!(adf.ordering.names().read().unwrap()[0], "a");
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[1], "b");
|
assert_eq!(adf.ordering.names().read().unwrap()[1], "b");
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[2], "c");
|
assert_eq!(adf.ordering.names().read().unwrap()[2], "c");
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[3], "d");
|
assert_eq!(adf.ordering.names().read().unwrap()[3], "d");
|
||||||
assert_eq!(adf.ordering.names().as_ref().borrow()[4], "e");
|
assert_eq!(adf.ordering.names().read().unwrap()[4], "e");
|
||||||
|
|
||||||
assert_eq!(adf.ac, vec![Term(3), Term(7), Term(2), Term(11), Term(13)]);
|
assert_eq!(adf.ac, vec![Term(3), Term(7), Term(2), Term(11), Term(13)]);
|
||||||
}
|
}
|
||||||
@ -701,7 +1081,7 @@ mod test {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let mut adf = Adf::from_parser(&parser);
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
|
||||||
let mut stable = adf.stable_count_optimisation();
|
let mut stable = adf.stable_count_optimisation_heu_a();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
stable.next(),
|
stable.next(),
|
||||||
Some(vec![
|
Some(vec![
|
||||||
@ -717,7 +1097,7 @@ mod test {
|
|||||||
let parser = AdfParser::default();
|
let parser = AdfParser::default();
|
||||||
parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap();
|
parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap();
|
||||||
let mut adf = Adf::from_parser(&parser);
|
let mut adf = Adf::from_parser(&parser);
|
||||||
let mut stable = adf.stable_count_optimisation();
|
let mut stable = adf.stable_count_optimisation_heu_a();
|
||||||
|
|
||||||
assert_eq!(stable.next(), Some(vec![Term::BOT, Term::TOP]));
|
assert_eq!(stable.next(), Some(vec![Term::BOT, Term::TOP]));
|
||||||
assert_eq!(stable.next(), Some(vec![Term::TOP, Term::BOT]));
|
assert_eq!(stable.next(), Some(vec![Term::TOP, Term::BOT]));
|
||||||
@ -728,14 +1108,173 @@ mod test {
|
|||||||
let mut adf = Adf::from_parser(&parser);
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
adf.stable_count_optimisation().collect::<Vec<_>>(),
|
adf.stable_count_optimisation_heu_a().collect::<Vec<_>>(),
|
||||||
|
vec![vec![Term::BOT, Term::BOT]]
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
adf.stable_count_optimisation_heu_b().collect::<Vec<_>>(),
|
||||||
vec![vec![Term::BOT, Term::BOT]]
|
vec![vec![Term::BOT, Term::BOT]]
|
||||||
);
|
);
|
||||||
|
|
||||||
let parser = AdfParser::default();
|
let parser = AdfParser::default();
|
||||||
parser.parse()("s(a).s(b).ac(a,neg(a)).ac(b,a).").unwrap();
|
parser.parse()("s(a).s(b).ac(a,neg(a)).ac(b,a).").unwrap();
|
||||||
let mut adf = Adf::from_parser(&parser);
|
let mut adf = Adf::from_parser(&parser);
|
||||||
assert_eq!(adf.stable_count_optimisation().next(), None);
|
assert_eq!(adf.stable_count_optimisation_heu_a().next(), None);
|
||||||
|
assert_eq!(adf.stable_count_optimisation_heu_b().next(), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn stable_nogood() {
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
|
||||||
|
.unwrap();
|
||||||
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
|
||||||
|
let grounded = adf.grounded();
|
||||||
|
let (s, r) = unbounded();
|
||||||
|
adf.nogood_internal(&grounded, heuristics::heu_simple, Adf::stability_check, s);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
r.iter().collect::<Vec<_>>(),
|
||||||
|
vec![vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP
|
||||||
|
]]
|
||||||
|
);
|
||||||
|
let mut stable_iter = adf.stable_nogood(Heuristic::Simple);
|
||||||
|
assert_eq!(
|
||||||
|
stable_iter.next(),
|
||||||
|
Some(vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP
|
||||||
|
])
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(stable_iter.next(), None);
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap();
|
||||||
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
let grounded = adf.grounded();
|
||||||
|
let (s, r) = unbounded();
|
||||||
|
adf.nogood_internal(
|
||||||
|
&grounded,
|
||||||
|
heuristics::heu_simple,
|
||||||
|
Adf::stability_check,
|
||||||
|
s.clone(),
|
||||||
|
);
|
||||||
|
let stable_result = r.try_iter().collect::<Vec<_>>();
|
||||||
|
assert_eq!(
|
||||||
|
stable_result,
|
||||||
|
vec![vec![Term(1), Term(0)], vec![Term(0), Term(1)]]
|
||||||
|
);
|
||||||
|
|
||||||
|
let stable = adf.stable_nogood(Heuristic::Simple);
|
||||||
|
assert_eq!(
|
||||||
|
stable.collect::<Vec<_>>(),
|
||||||
|
vec![vec![Term(1), Term(0)], vec![Term(0), Term(1)]]
|
||||||
|
);
|
||||||
|
|
||||||
|
let stable = adf.stable_nogood(Heuristic::Custom(&|_adf, interpr| {
|
||||||
|
for (idx, term) in interpr.iter().enumerate() {
|
||||||
|
if !term.is_truth_value() {
|
||||||
|
return Some((Var(idx), Term::BOT));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}));
|
||||||
|
assert_eq!(
|
||||||
|
stable.collect::<Vec<_>>(),
|
||||||
|
vec![vec![Term(0), Term(1)], vec![Term(1), Term(0)]]
|
||||||
|
);
|
||||||
|
|
||||||
|
adf.stable_nogood_channel(Heuristic::default(), s);
|
||||||
|
assert_eq!(
|
||||||
|
r.iter().collect::<Vec<_>>(),
|
||||||
|
vec![vec![Term(1), Term(0)], vec![Term(0), Term(1)]]
|
||||||
|
);
|
||||||
|
|
||||||
|
// multi-threaded usage
|
||||||
|
let (s, r) = unbounded();
|
||||||
|
let solving = std::thread::spawn(move || {
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
parser.parse()("s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,b).ac(c,and(a,b)).ac(d,neg(b)).\ns(e).ac(e,and(b,or(neg(b),c(f)))).s(f).\n\nac(f,xor(a,e)).")
|
||||||
|
.unwrap();
|
||||||
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
adf.stable_nogood_channel(Heuristic::MinModMaxVarImpMinPaths, s.clone());
|
||||||
|
adf.stable_nogood_channel(Heuristic::MinModMinPathsMaxVarImp, s.clone());
|
||||||
|
adf.two_val_nogood_channel(Heuristic::Simple, s)
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut result_vec = Vec::new();
|
||||||
|
while let Ok(result) = r.recv() {
|
||||||
|
result_vec.push(result);
|
||||||
|
}
|
||||||
|
assert_eq!(
|
||||||
|
result_vec,
|
||||||
|
vec![
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::TOP,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP
|
||||||
|
],
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP
|
||||||
|
],
|
||||||
|
]
|
||||||
|
);
|
||||||
|
solving.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rand_stable_heu() {
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
parser.parse()("s(a).s(b).ac(a,neg(b)).ac(b,neg(a)).").unwrap();
|
||||||
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
let result = adf.stable_nogood(Heuristic::Rand).collect::<Vec<_>>();
|
||||||
|
assert!(result.contains(&vec![Term(0), Term(1)]));
|
||||||
|
assert!(result.contains(&vec![Term(1), Term(0)]));
|
||||||
|
assert_eq!(result.len(), 2);
|
||||||
|
|
||||||
|
let mut adf = Adf::from_parser(&parser);
|
||||||
|
adf.seed([
|
||||||
|
122, 186, 240, 42, 235, 102, 89, 81, 187, 203, 127, 188, 167, 198, 126, 156, 25, 205,
|
||||||
|
204, 132, 112, 93, 23, 193, 21, 108, 166, 231, 158, 250, 128, 135,
|
||||||
|
]);
|
||||||
|
let result = adf.stable_nogood(Heuristic::Rand).collect::<Vec<_>>();
|
||||||
|
assert_eq!(result, vec![vec![Term(1), Term(0)], vec![Term(0), Term(1)]]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -780,6 +1319,7 @@ mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "adhoccountmodels")]
|
||||||
#[test]
|
#[test]
|
||||||
fn formulacounts() {
|
fn formulacounts() {
|
||||||
let parser = AdfParser::default();
|
let parser = AdfParser::default();
|
||||||
|
|||||||
162
lib/src/adf/heuristics.rs
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
/*!
|
||||||
|
This module contains all the crate-wide defined heuristic functions.
|
||||||
|
In addition there is the public enum [Heuristic], which allows to set a heuristic function with the public API.
|
||||||
|
*/
|
||||||
|
use super::Adf;
|
||||||
|
use crate::datatypes::{Term, Var};
|
||||||
|
|
||||||
|
use rand::{Rng, RngCore};
|
||||||
|
use strum::{EnumString, EnumVariantNames};
|
||||||
|
|
||||||
|
/// Return value for heuristics.
|
||||||
|
pub type RetVal = Option<(Var, Term)>;
|
||||||
|
/// Signature for heuristics functions.
|
||||||
|
pub type HeuristicFn = dyn Fn(&Adf, &[Term]) -> RetVal + Sync;
|
||||||
|
|
||||||
|
pub(crate) fn heu_simple(_adf: &Adf, interpr: &[Term]) -> Option<(Var, Term)> {
|
||||||
|
for (idx, term) in interpr.iter().enumerate() {
|
||||||
|
if !term.is_truth_value() {
|
||||||
|
return Some((Var(idx), Term::TOP));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn heu_mc_minpaths_maxvarimp(adf: &Adf, interpr: &[Term]) -> Option<(Var, Term)> {
|
||||||
|
interpr
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_var, term)| !term.is_truth_value())
|
||||||
|
.min_by(|(vara, &terma), (varb, &termb)| {
|
||||||
|
match adf
|
||||||
|
.bdd
|
||||||
|
.paths(terma, true)
|
||||||
|
.minimum()
|
||||||
|
.cmp(&adf.bdd.paths(termb, true).minimum())
|
||||||
|
{
|
||||||
|
std::cmp::Ordering::Equal => adf
|
||||||
|
.bdd
|
||||||
|
.passive_var_impact(Var::from(*vara), interpr)
|
||||||
|
.cmp(&adf.bdd.passive_var_impact(Var::from(*varb), interpr)),
|
||||||
|
value => value,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.map(|(var, term)| {
|
||||||
|
(
|
||||||
|
Var::from(var),
|
||||||
|
adf.bdd.paths(*term, true).more_models().into(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn heu_mc_maxvarimp_minpaths(adf: &Adf, interpr: &[Term]) -> Option<(Var, Term)> {
|
||||||
|
interpr
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_var, term)| !term.is_truth_value())
|
||||||
|
.min_by(|(vara, &terma), (varb, &termb)| {
|
||||||
|
match adf
|
||||||
|
.bdd
|
||||||
|
.passive_var_impact(Var::from(*vara), interpr)
|
||||||
|
.cmp(&adf.bdd.passive_var_impact(Var::from(*varb), interpr))
|
||||||
|
{
|
||||||
|
std::cmp::Ordering::Equal => adf
|
||||||
|
.bdd
|
||||||
|
.paths(terma, true)
|
||||||
|
.minimum()
|
||||||
|
.cmp(&adf.bdd.paths(termb, true).minimum()),
|
||||||
|
|
||||||
|
value => value,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.map(|(var, term)| {
|
||||||
|
(
|
||||||
|
Var::from(var),
|
||||||
|
adf.bdd.paths(*term, true).more_models().into(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn heu_rand(adf: &Adf, interpr: &[Term]) -> Option<(Var, Term)> {
|
||||||
|
let possible = interpr
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(_var, term)| !term.is_truth_value())
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
if possible.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let mut rng = adf.rng.borrow_mut();
|
||||||
|
if let Ok(position) = usize::try_from(rng.next_u64() % (possible.len() as u64)) {
|
||||||
|
Some((Var::from(position), rng.gen_bool(0.5).into()))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Enumeration of all currently implemented heuristics.
|
||||||
|
/// It represents a public view on the crate-view implementations of heuristics.
|
||||||
|
#[derive(EnumString, EnumVariantNames, Copy, Clone)]
|
||||||
|
pub enum Heuristic<'a> {
|
||||||
|
/// Implementation of a simple heuristic.
|
||||||
|
/// This will just take the first not decided variable and maps it value to (`true`)[Term::TOP].
|
||||||
|
Simple,
|
||||||
|
/// Implementation of a heuristic, which which uses minimal number of [paths][crate::obdd::Bdd::paths] and maximal [variable-impact][crate::obdd::Bdd::passive_var_impact to identify the variable to be set.
|
||||||
|
/// As the value of the variable value with the maximal model-path is chosen.
|
||||||
|
MinModMinPathsMaxVarImp,
|
||||||
|
/// Implementation of a heuristic, which which uses maximal [variable-impact][crate::obdd::Bdd::passive_var_impact] and minimal number of [paths][crate::obdd::Bdd::paths] to identify the variable to be set.
|
||||||
|
/// As the value of the variable value with the maximal model-path is chosen.
|
||||||
|
MinModMaxVarImpMinPaths,
|
||||||
|
/// Implementation of a heuristic, which chooses random values.
|
||||||
|
Rand,
|
||||||
|
/// Allow passing in an externally-defined custom heuristic.
|
||||||
|
#[strum(disabled)]
|
||||||
|
Custom(&'a HeuristicFn),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Heuristic<'_> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::Simple
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for Heuristic<'_> {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Simple => write!(f, "Simple"),
|
||||||
|
Self::MinModMinPathsMaxVarImp => write!(f, "Maximal model-path count as value and minimum paths with maximal variable impact as variable choice"),
|
||||||
|
Self::MinModMaxVarImpMinPaths => write!(f, "Maximal model-path count as value and maximal variable impact with minimum paths as variable choice"),
|
||||||
|
Self::Rand => write!(f, "Random heuristics"),
|
||||||
|
Self::Custom(_) => f.debug_tuple("Custom function").finish(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Heuristic<'_> {
|
||||||
|
pub(crate) fn get_heuristic(&self) -> &(dyn Fn(&Adf, &[Term]) -> RetVal + '_) {
|
||||||
|
match self {
|
||||||
|
Heuristic::Simple => &heu_simple,
|
||||||
|
Heuristic::MinModMinPathsMaxVarImp => &heu_mc_minpaths_maxvarimp,
|
||||||
|
Heuristic::MinModMaxVarImpMinPaths => &heu_mc_maxvarimp_minpaths,
|
||||||
|
Heuristic::Rand => &heu_rand,
|
||||||
|
Self::Custom(f) => f,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
use crate::datatypes::Term;
|
||||||
|
use crate::datatypes::Var;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn debug_out() {
|
||||||
|
dbg!(Heuristic::Simple);
|
||||||
|
dbg!(Heuristic::MinModMaxVarImpMinPaths);
|
||||||
|
dbg!(Heuristic::MinModMinPathsMaxVarImp);
|
||||||
|
dbg!(Heuristic::Rand);
|
||||||
|
dbg!(Heuristic::Custom(&|_adf: &Adf,
|
||||||
|
_int: &[Term]|
|
||||||
|
-> Option<(Var, Term)> { None }));
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,8 +1,11 @@
|
|||||||
//! This module describes the abstract dialectical framework
|
//! This module describes the abstract dialectical framework
|
||||||
//! utilising the biodivine-lib-bdd (see <https://github.com/sybila/biodivine-lib-bdd>) BDD implementation to compute the
|
//! utilising the biodivine-lib-bdd (see <https://github.com/sybila/biodivine-lib-bdd>) BDD implementation to compute various semantics.
|
||||||
|
//!
|
||||||
|
//! These are currently the
|
||||||
//! - grounded
|
//! - grounded
|
||||||
//! - stable
|
//! - stable
|
||||||
//! - complete
|
//! - complete
|
||||||
|
//!
|
||||||
//! semantics of ADFs.
|
//! semantics of ADFs.
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -21,7 +24,7 @@ use derivative::Derivative;
|
|||||||
|
|
||||||
#[derive(Derivative)]
|
#[derive(Derivative)]
|
||||||
#[derivative(Debug)]
|
#[derivative(Debug)]
|
||||||
/// Representation of an ADF, with an ordering and dictionary of statement <-> number relations, a binary decision diagram, and a list of acceptance functions in biodivine representation together with a variable-list (needed by biodivine)
|
/// Representation of an ADF, with an ordering and dictionary which relates statements to numbers, a binary decision diagram, and a list of acceptance functions in biodivine representation together with a variable-list (needed by biodivine).
|
||||||
///
|
///
|
||||||
/// To be compatible with results from the own implementation of the Bdd-based [`Adf`][crate::adf::Adf], we use the [`Term`][crate::datatypes::Term]-based representation for the various computed models.
|
/// To be compatible with results from the own implementation of the Bdd-based [`Adf`][crate::adf::Adf], we use the [`Term`][crate::datatypes::Term]-based representation for the various computed models.
|
||||||
pub struct Adf {
|
pub struct Adf {
|
||||||
@ -34,23 +37,21 @@ pub struct Adf {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Adf {
|
impl Adf {
|
||||||
/// Instantiates a new ADF, based on the parser-data
|
/// Instantiates a new ADF, based on the parser-data.
|
||||||
pub fn from_parser(parser: &AdfParser) -> Self {
|
pub fn from_parser(parser: &AdfParser) -> Self {
|
||||||
log::info!("[Start] instantiating BDD");
|
log::info!("[Start] instantiating BDD");
|
||||||
let mut bdd_var_builder = biodivine_lib_bdd::BddVariableSetBuilder::new();
|
let mut bdd_var_builder = biodivine_lib_bdd::BddVariableSetBuilder::new();
|
||||||
let namelist = parser.namelist_rc_refcell().as_ref().borrow().clone();
|
let namelist = parser
|
||||||
|
.namelist()
|
||||||
|
.read()
|
||||||
|
.expect("ReadLock on namelist failed")
|
||||||
|
.clone();
|
||||||
let slice_vec: Vec<&str> = namelist.iter().map(<_>::as_ref).collect();
|
let slice_vec: Vec<&str> = namelist.iter().map(<_>::as_ref).collect();
|
||||||
bdd_var_builder.make_variables(&slice_vec);
|
bdd_var_builder.make_variables(&slice_vec);
|
||||||
let bdd_variables = bdd_var_builder.build();
|
let bdd_variables = bdd_var_builder.build();
|
||||||
let mut result = Self {
|
let mut result = Self {
|
||||||
ordering: VarContainer::from_parser(
|
ordering: parser.var_container(),
|
||||||
parser.namelist_rc_refcell(),
|
ac: vec![bdd_variables.mk_false(); parser.dict_size()],
|
||||||
parser.dict_rc_refcell(),
|
|
||||||
),
|
|
||||||
ac: vec![
|
|
||||||
bdd_variables.mk_false();
|
|
||||||
parser.namelist_rc_refcell().as_ref().borrow().len()
|
|
||||||
],
|
|
||||||
vars: bdd_variables.variables(),
|
vars: bdd_variables.variables(),
|
||||||
varset: bdd_variables,
|
varset: bdd_variables,
|
||||||
rewrite: None,
|
rewrite: None,
|
||||||
@ -78,7 +79,7 @@ impl Adf {
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Instantiates a new ADF and prepares a rewriting for the stable model computation based on the parser-data
|
/// Instantiates a new ADF and prepares a rewriting for the stable model computation based on the parser-data.
|
||||||
pub fn from_parser_with_stm_rewrite(parser: &AdfParser) -> Self {
|
pub fn from_parser_with_stm_rewrite(parser: &AdfParser) -> Self {
|
||||||
let mut result = Self::from_parser(parser);
|
let mut result = Self::from_parser(parser);
|
||||||
log::debug!("[Start] rewriting");
|
log::debug!("[Start] rewriting");
|
||||||
@ -89,7 +90,7 @@ impl Adf {
|
|||||||
|
|
||||||
pub(crate) fn stm_rewriting(&mut self, parser: &AdfParser) {
|
pub(crate) fn stm_rewriting(&mut self, parser: &AdfParser) {
|
||||||
let expr = parser.formula_order().iter().enumerate().fold(
|
let expr = parser.formula_order().iter().enumerate().fold(
|
||||||
biodivine_lib_bdd::boolean_expression::BooleanExpression::Const(true),
|
BooleanExpression::Const(true),
|
||||||
|acc, (insert_order, new_order)| {
|
|acc, (insert_order, new_order)| {
|
||||||
BooleanExpression::And(
|
BooleanExpression::And(
|
||||||
Box::new(acc),
|
Box::new(acc),
|
||||||
@ -108,7 +109,7 @@ impl Adf {
|
|||||||
self.rewrite = Some(self.varset.eval_expression(&expr));
|
self.rewrite = Some(self.varset.eval_expression(&expr));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// returns `true` if the stable rewriting for this adf exists
|
/// returns `true` if the stable rewriting for this ADF exists.
|
||||||
pub fn has_stm_rewriting(&self) -> bool {
|
pub fn has_stm_rewriting(&self) -> bool {
|
||||||
self.rewrite.is_some()
|
self.rewrite.is_some()
|
||||||
}
|
}
|
||||||
@ -120,7 +121,7 @@ impl Adf {
|
|||||||
pub(crate) fn ac(&self) -> &[Bdd] {
|
pub(crate) fn ac(&self) -> &[Bdd] {
|
||||||
&self.ac
|
&self.ac
|
||||||
}
|
}
|
||||||
/// Computes the grounded extension and returns it as a list
|
/// Computes the grounded extension and returns it as a list.
|
||||||
pub fn grounded(&self) -> Vec<Term> {
|
pub fn grounded(&self) -> Vec<Term> {
|
||||||
log::info!("[Start] grounded");
|
log::info!("[Start] grounded");
|
||||||
let ac = &self.ac.clone();
|
let ac = &self.ac.clone();
|
||||||
@ -186,8 +187,8 @@ impl Adf {
|
|||||||
new_interpretation
|
new_interpretation
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the complete models
|
/// Computes the complete models.
|
||||||
/// Returns an Iterator which contains all the complete models
|
/// Returns an [Iterator][std::iter::Iterator] which contains all the complete models.
|
||||||
pub fn complete<'a, 'b>(&'a self) -> impl Iterator<Item = Vec<Term>> + 'b
|
pub fn complete<'a, 'b>(&'a self) -> impl Iterator<Item = Vec<Term>> + 'b
|
||||||
where
|
where
|
||||||
'a: 'b,
|
'a: 'b,
|
||||||
@ -205,7 +206,7 @@ impl Adf {
|
|||||||
|
|
||||||
/// Shifts the representation and allows to use the naive approach.
|
/// Shifts the representation and allows to use the naive approach.
|
||||||
///
|
///
|
||||||
/// The grounded interpretation is computed by the biodivine library first.
|
/// The grounded interpretation is computed by the [biodivine library](https://github.com/sybila/biodivine-lib-bdd) first.
|
||||||
pub fn hybrid_step(&self) -> crate::adf::Adf {
|
pub fn hybrid_step(&self) -> crate::adf::Adf {
|
||||||
crate::adf::Adf::from_biodivine_vector(
|
crate::adf::Adf::from_biodivine_vector(
|
||||||
self.var_container(),
|
self.var_container(),
|
||||||
@ -215,7 +216,7 @@ impl Adf {
|
|||||||
|
|
||||||
/// Shifts the representation and allows to use the naive approach.
|
/// Shifts the representation and allows to use the naive approach.
|
||||||
///
|
///
|
||||||
/// `bio_grounded` will compute the grounded, based on biodivine, first.
|
/// `bio_grounded` will compute the grounded, based on [biodivine](https://github.com/sybila/biodivine-lib-bdd), first.
|
||||||
pub fn hybrid_step_opt(&self, bio_grounded: bool) -> crate::adf::Adf {
|
pub fn hybrid_step_opt(&self, bio_grounded: bool) -> crate::adf::Adf {
|
||||||
if bio_grounded {
|
if bio_grounded {
|
||||||
self.hybrid_step()
|
self.hybrid_step()
|
||||||
@ -224,8 +225,8 @@ impl Adf {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the stable models
|
/// Computes the stable models.
|
||||||
/// Returns an Iterator which contains all the stable models
|
/// Returns an [Iterator][std::iter::Iterator] which contains all the stable models.
|
||||||
pub fn stable<'a, 'b>(&'a self) -> impl Iterator<Item = Vec<Term>> + 'b
|
pub fn stable<'a, 'b>(&'a self) -> impl Iterator<Item = Vec<Term>> + 'b
|
||||||
where
|
where
|
||||||
'a: 'b,
|
'a: 'b,
|
||||||
@ -258,8 +259,8 @@ impl Adf {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the stable models
|
/// Computes the stable models.
|
||||||
/// This variant returns all stable models and utilises a rewrite of the adf as one big conjunction of equalities (iff)
|
/// This variant returns all stable models and utilises a rewrite of the ADF as one big conjunction of equalities (`if and only if`).
|
||||||
pub fn stable_bdd_representation(&self) -> Vec<Vec<Term>> {
|
pub fn stable_bdd_representation(&self) -> Vec<Vec<Term>> {
|
||||||
let smc = self.stable_model_candidates();
|
let smc = self.stable_model_candidates();
|
||||||
log::debug!("[Start] checking for stability");
|
log::debug!("[Start] checking for stability");
|
||||||
@ -317,29 +318,26 @@ impl Adf {
|
|||||||
.collect::<Vec<Vec<Term>>>()
|
.collect::<Vec<Vec<Term>>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// compute the stable representation
|
||||||
fn stable_representation(&self) -> Bdd {
|
fn stable_representation(&self) -> Bdd {
|
||||||
log::debug!("[Start] stable representation rewriting");
|
log::debug!("[Start] stable representation rewriting");
|
||||||
self.ac.iter().enumerate().fold(
|
self.ac.iter().enumerate().fold(
|
||||||
self.varset.eval_expression(
|
self.varset.eval_expression(&BooleanExpression::Const(true)),
|
||||||
&biodivine_lib_bdd::boolean_expression::BooleanExpression::Const(true),
|
|
||||||
),
|
|
||||||
|acc, (idx, formula)| {
|
|acc, (idx, formula)| {
|
||||||
acc.and(
|
acc.and(
|
||||||
&formula.iff(
|
&formula.iff(
|
||||||
&self.varset.eval_expression(
|
&self.varset.eval_expression(&BooleanExpression::Variable(
|
||||||
&biodivine_lib_bdd::boolean_expression::BooleanExpression::Variable(
|
self.ordering
|
||||||
self.ordering
|
.name(crate::datatypes::Var(idx))
|
||||||
.name(crate::datatypes::Var(idx))
|
.expect("Variable should exist"),
|
||||||
.expect("Variable should exist"),
|
)),
|
||||||
),
|
|
||||||
),
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creates a [PrintableInterpretation] for output purposes
|
/// Creates a [PrintableInterpretation] for output purposes.
|
||||||
pub fn print_interpretation<'a, 'b>(
|
pub fn print_interpretation<'a, 'b>(
|
||||||
&'a self,
|
&'a self,
|
||||||
interpretation: &'b [Term],
|
interpretation: &'b [Term],
|
||||||
@ -350,18 +348,18 @@ impl Adf {
|
|||||||
PrintableInterpretation::new(interpretation, &self.ordering)
|
PrintableInterpretation::new(interpretation, &self.ordering)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creates a [PrintDictionary] for output purposes
|
/// Creates a [PrintDictionary] for output purposes.
|
||||||
pub fn print_dictionary(&self) -> PrintDictionary {
|
pub fn print_dictionary(&self) -> PrintDictionary {
|
||||||
PrintDictionary::new(&self.ordering)
|
PrintDictionary::new(&self.ordering)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Provides Adf-Specific operations on truth valuations
|
/// Provides ADF-Specific operations on truth valuations.
|
||||||
pub trait AdfOperations {
|
pub trait AdfOperations {
|
||||||
/// Returns `true` if the BDD is either valid or unsatisfiable
|
/// Returns `true` if the roBDD is either valid or unsatisfiable.
|
||||||
fn is_truth_value(&self) -> bool;
|
fn is_truth_value(&self) -> bool;
|
||||||
|
|
||||||
/// Compares whether the information between two given BDDs are the same
|
/// Compares whether the information between two given roBDDs are the same.
|
||||||
fn cmp_information(&self, other: &Self) -> bool;
|
fn cmp_information(&self, other: &Self) -> bool;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -375,17 +373,17 @@ impl AdfOperations for Bdd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implementations of the restrict-operations on BDDs
|
/// Implementations of the restrict-operations on roBDDs.
|
||||||
pub trait BddRestrict {
|
pub trait BddRestrict {
|
||||||
/// Provides an implementation of the restrict-operation on BDDs for one variable
|
/// Provides an implementation of the restrict-operation on roBDDs for one variable.
|
||||||
fn var_restrict(&self, variable: biodivine_lib_bdd::BddVariable, value: bool) -> Self;
|
fn var_restrict(&self, variable: biodivine_lib_bdd::BddVariable, value: bool) -> Self;
|
||||||
/// Provides an implementation of the restrict-operation on a set of variables
|
/// Provides an implementation of the restrict-operation on a set of variables.
|
||||||
fn restrict(&self, variables: &[(biodivine_lib_bdd::BddVariable, bool)]) -> Self;
|
fn restrict(&self, variables: &[(biodivine_lib_bdd::BddVariable, bool)]) -> Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BddRestrict for Bdd {
|
impl BddRestrict for Bdd {
|
||||||
fn var_restrict(&self, variable: biodivine_lib_bdd::BddVariable, value: bool) -> Bdd {
|
fn var_restrict(&self, variable: biodivine_lib_bdd::BddVariable, value: bool) -> Bdd {
|
||||||
self.var_select(variable, value).var_project(variable)
|
self.var_select(variable, value).var_exists(variable)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restrict(&self, variables: &[(biodivine_lib_bdd::BddVariable, bool)]) -> Bdd {
|
fn restrict(&self, variables: &[(biodivine_lib_bdd::BddVariable, bool)]) -> Bdd {
|
||||||
@ -393,7 +391,7 @@ impl BddRestrict for Bdd {
|
|||||||
variables
|
variables
|
||||||
.iter()
|
.iter()
|
||||||
.for_each(|(var, _val)| variablelist.push(*var));
|
.for_each(|(var, _val)| variablelist.push(*var));
|
||||||
self.select(variables).project(&variablelist)
|
self.select(variables).exists(&variablelist)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -429,8 +427,8 @@ mod test {
|
|||||||
let c = variables.eval_expression_string("c");
|
let c = variables.eval_expression_string("c");
|
||||||
let d = variables.eval_expression_string("a & b & c");
|
let d = variables.eval_expression_string("a & b & c");
|
||||||
let e = variables.eval_expression_string("a ^ b");
|
let e = variables.eval_expression_string("a ^ b");
|
||||||
let t = variables.eval_expression(&boolean_expression::BooleanExpression::Const(true));
|
let t = variables.eval_expression(&BooleanExpression::Const(true));
|
||||||
let f = variables.eval_expression(&boolean_expression::BooleanExpression::Const(false));
|
let f = variables.eval_expression(&BooleanExpression::Const(false));
|
||||||
|
|
||||||
println!("{:?}", a.to_string());
|
println!("{:?}", a.to_string());
|
||||||
println!("{:?}", a.to_bytes());
|
println!("{:?}", a.to_bytes());
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
//! A collection of all the necessary datatypes of the system.
|
//! Collection of all the necessary datatypes of the system.
|
||||||
pub mod adf;
|
pub mod adf;
|
||||||
mod bdd;
|
mod bdd;
|
||||||
pub use bdd::*;
|
pub use bdd::*;
|
||||||
|
|||||||
@ -1,53 +1,73 @@
|
|||||||
//! Repesentation of all needed ADF based datatypes
|
//! Representation of all needed ADF based datatypes.
|
||||||
|
|
||||||
use super::{Term, Var};
|
use super::{Term, Var};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{cell::RefCell, collections::HashMap, fmt::Display, rc::Rc};
|
use std::{collections::HashMap, fmt::Display, sync::Arc, sync::RwLock};
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
/// A container which acts as a dictionary as well as an ordering of variables.
|
||||||
pub(crate) struct VarContainer {
|
/// *names* is a list of variable-names and the sequence of the values is inducing the order of variables.
|
||||||
names: Rc<RefCell<Vec<String>>>,
|
/// *mapping* allows to search for a variable name and to receive the corresponding position in the variable list (`names`).
|
||||||
mapping: Rc<RefCell<HashMap<String, usize>>>,
|
///
|
||||||
|
/// # Important note
|
||||||
|
/// If one [VarContainer] is used to instantiate an [Adf][crate::adf::Adf] (resp. [Biodivine Adf][crate::adfbiodivine::Adf]) a revision (other than adding more information) might result in wrong variable-name mapping when trying to print the output using the [PrintDictionary].
|
||||||
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
|
pub struct VarContainer {
|
||||||
|
names: Arc<RwLock<Vec<String>>>,
|
||||||
|
mapping: Arc<RwLock<HashMap<String, usize>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for VarContainer {
|
impl Default for VarContainer {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
VarContainer {
|
VarContainer {
|
||||||
names: Rc::new(RefCell::new(Vec::new())),
|
names: Arc::new(RwLock::new(Vec::new())),
|
||||||
mapping: Rc::new(RefCell::new(HashMap::new())),
|
mapping: Arc::new(RwLock::new(HashMap::new())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl VarContainer {
|
impl VarContainer {
|
||||||
|
/// Create [`VarContainer`] from its components
|
||||||
pub fn from_parser(
|
pub fn from_parser(
|
||||||
names: Rc<RefCell<Vec<String>>>,
|
names: Arc<RwLock<Vec<String>>>,
|
||||||
mapping: Rc<RefCell<HashMap<String, usize>>>,
|
mapping: Arc<RwLock<HashMap<String, usize>>>,
|
||||||
) -> VarContainer {
|
) -> VarContainer {
|
||||||
VarContainer { names, mapping }
|
VarContainer { names, mapping }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn copy(from: &Self) -> Self {
|
/// Get the [Var] used by the `Bdd` which corresponds to the given [&str].
|
||||||
VarContainer {
|
/// Returns [None] if no matching value is found.
|
||||||
names: from.names.clone(),
|
|
||||||
mapping: from.mapping.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn variable(&self, name: &str) -> Option<Var> {
|
pub fn variable(&self, name: &str) -> Option<Var> {
|
||||||
self.mapping.borrow().get(name).map(|val| Var(*val))
|
self.mapping
|
||||||
|
.read()
|
||||||
|
.ok()
|
||||||
|
.and_then(|map| map.get(name).map(|val| Var(*val)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the name which corresponds to the given [Var].
|
||||||
|
/// Returns [None] if no matching value is found.
|
||||||
pub fn name(&self, var: Var) -> Option<String> {
|
pub fn name(&self, var: Var) -> Option<String> {
|
||||||
self.names.borrow().get(var.value()).cloned()
|
self.names
|
||||||
|
.read()
|
||||||
|
.ok()
|
||||||
|
.and_then(|name| name.get(var.value()).cloned())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
/// Return ordered names from [`VarContainer`]
|
||||||
pub fn names(&self) -> Rc<RefCell<Vec<String>>> {
|
pub fn names(&self) -> Arc<RwLock<Vec<String>>> {
|
||||||
Rc::clone(&self.names)
|
Arc::clone(&self.names)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return map from names to indices in [`VarContainer`]
|
||||||
|
pub fn mappings(&self) -> Arc<RwLock<HashMap<String, usize>>> {
|
||||||
|
Arc::clone(&self.mapping)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a [PrintDictionary] for output purposes.
|
||||||
|
pub fn print_dictionary(&self) -> PrintDictionary {
|
||||||
|
PrintDictionary::new(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// A struct which holds the dictionary to print interpretations and allows to instantiate printable interpretations
|
/// A struct which holds the dictionary to print interpretations and allows to instantiate printable interpretations.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct PrintDictionary {
|
pub struct PrintDictionary {
|
||||||
ordering: VarContainer,
|
ordering: VarContainer,
|
||||||
@ -56,7 +76,7 @@ pub struct PrintDictionary {
|
|||||||
impl PrintDictionary {
|
impl PrintDictionary {
|
||||||
pub(crate) fn new(order: &VarContainer) -> Self {
|
pub(crate) fn new(order: &VarContainer) -> Self {
|
||||||
Self {
|
Self {
|
||||||
ordering: VarContainer::copy(order),
|
ordering: order.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// creates a [PrintableInterpretation] for output purposes
|
/// creates a [PrintableInterpretation] for output purposes
|
||||||
@ -115,8 +135,8 @@ impl Display for PrintableInterpretation<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Provides an Iterator, which contains all two valued Interpretations, with respect to the given
|
/// Provides an [Iterator][std::iter::Iterator], which contains all two valued interpretations, with respect to the given
|
||||||
/// 3-valued interpretation.
|
/// three valued interpretation.
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct TwoValuedInterpretationsIterator {
|
pub struct TwoValuedInterpretationsIterator {
|
||||||
@ -126,12 +146,12 @@ pub struct TwoValuedInterpretationsIterator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TwoValuedInterpretationsIterator {
|
impl TwoValuedInterpretationsIterator {
|
||||||
/// Creates a new iterable structure, which represents all two-valued interpretations wrt. the given 3-valued interpretation
|
/// Creates a new iterable structure, which represents all two-valued interpretations wrt. the given three valued interpretation.
|
||||||
pub fn new(term: &[Term]) -> Self {
|
pub fn new(term: &[Term]) -> Self {
|
||||||
let indexes = term
|
let indexes = term
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter_map(|(idx, &v)| (!v.is_truth_value()).then(|| idx))
|
.filter_map(|(idx, &v)| (!v.is_truth_value()).then_some(idx))
|
||||||
.rev()
|
.rev()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let current = term
|
let current = term
|
||||||
@ -195,7 +215,7 @@ impl ThreeValuedInterpretationsIterator {
|
|||||||
let indexes = term
|
let indexes = term
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter_map(|(idx, &v)| (!v.is_truth_value()).then(|| idx))
|
.filter_map(|(idx, &v)| (!v.is_truth_value()).then_some(idx))
|
||||||
.rev()
|
.rev()
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
let current = vec![2; indexes.len()];
|
let current = vec![2; indexes.len()];
|
||||||
@ -216,7 +236,7 @@ impl ThreeValuedInterpretationsIterator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decrement_vec(vector: &mut Vec<usize>) -> bool {
|
fn decrement_vec(vector: &mut [usize]) -> bool {
|
||||||
let mut cur_pos = None;
|
let mut cur_pos = None;
|
||||||
for (idx, value) in vector.iter_mut().enumerate() {
|
for (idx, value) in vector.iter_mut().enumerate() {
|
||||||
if *value > 0 {
|
if *value > 0 {
|
||||||
|
|||||||
@ -1,14 +1,14 @@
|
|||||||
//! To represent a BDD, a couple of datatypes is needed.
|
//! To represent a BDD, a couple of datatypes is needed.
|
||||||
//! This module consists of all internally and externally used datatypes, such as
|
//! This module consists of all internally and externally used datatypes, such as
|
||||||
//! [Term], [Var], and [BddNode]
|
//! [Term], [Var], and [BddNode].
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::{fmt::Display, ops::Deref};
|
use std::{fmt::Display, ops::Deref};
|
||||||
|
|
||||||
use crate::adfbiodivine::AdfOperations;
|
use crate::adfbiodivine::AdfOperations;
|
||||||
|
|
||||||
/// Representation of a Term
|
/// Representation of a Term.
|
||||||
/// Each Term is represented in a number ([usize]) and relates to a
|
/// Each [`Term`] is represented in a number ([usize]) and relates to a
|
||||||
/// Node in the decision diagram
|
/// node in the [BDD][crate::obdd::Bdd].
|
||||||
#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Copy, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Copy, Clone, Serialize, Deserialize)]
|
||||||
pub struct Term(pub usize);
|
pub struct Term(pub usize);
|
||||||
|
|
||||||
@ -25,6 +25,16 @@ impl From<usize> for Term {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<bool> for Term {
|
||||||
|
fn from(val: bool) -> Self {
|
||||||
|
if val {
|
||||||
|
Self::TOP
|
||||||
|
} else {
|
||||||
|
Self::BOT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<&biodivine_lib_bdd::Bdd> for Term {
|
impl From<&biodivine_lib_bdd::Bdd> for Term {
|
||||||
fn from(val: &biodivine_lib_bdd::Bdd) -> Self {
|
fn from(val: &biodivine_lib_bdd::Bdd) -> Self {
|
||||||
if val.is_true() {
|
if val.is_true() {
|
||||||
@ -44,14 +54,16 @@ impl Display for Term {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Term {
|
impl Term {
|
||||||
/// Represents the truth-value bottom, i.e. false
|
/// Represents the truth-value bottom, i.e., false.
|
||||||
pub const BOT: Term = Term(0);
|
pub const BOT: Term = Term(0);
|
||||||
/// Represents the truth-value top, i.e. true
|
/// Represents the truth-value top, i.e., true.
|
||||||
pub const TOP: Term = Term(1);
|
pub const TOP: Term = Term(1);
|
||||||
/// Represents the truth-value undecided, i.e. sat, but not valid
|
/// Represents the truth-value undecided, i.e., sat, but not valid.
|
||||||
|
///
|
||||||
|
/// In other words, we are describing a truth-value, which still allows a consistent solution, but is not necessarily decided yet.
|
||||||
pub const UND: Term = Term(2);
|
pub const UND: Term = Term(2);
|
||||||
|
|
||||||
/// Get the value of the Term, i.e. the corresponding [usize]
|
/// Get the value of the [Term], i.e., the corresponding [usize].
|
||||||
pub fn value(self) -> usize {
|
pub fn value(self) -> usize {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
@ -62,33 +74,33 @@ impl Term {
|
|||||||
self.0 <= Term::TOP.0
|
self.0 <= Term::TOP.0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true, if the Term is true, i.e. [Term::TOP]
|
/// Returns [true], if the [Term] is true, i.e., [Term::TOP].
|
||||||
pub fn is_true(&self) -> bool {
|
pub fn is_true(&self) -> bool {
|
||||||
*self == Self::TOP
|
*self == Self::TOP
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true, if the Terms have the same information-value
|
/// Returns [true], if the [Term]s have the same information-value.
|
||||||
pub fn compare_inf(&self, other: &Self) -> bool {
|
pub fn compare_inf(&self, other: &Self) -> bool {
|
||||||
self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true()
|
self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the information of *other* does not decrease and it is not inconsistent.
|
/// Returns [true] if the information of **other** does not decrease and it is not inconsistent.
|
||||||
pub fn no_inf_decrease(&self, other: &Self) -> bool {
|
pub fn no_inf_inconsistency(&self, other: &Self) -> bool {
|
||||||
if self.compare_inf(other) {
|
if self.compare_inf(other) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
!self.is_truth_value()
|
!self.is_truth_value()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true, if the Term and the BDD have the same information-value
|
/// Returns [true], if the [Term] and the roBDD have the same information-value.
|
||||||
pub fn cmp_information(&self, other: &biodivine_lib_bdd::Bdd) -> bool {
|
pub fn cmp_information(&self, other: &biodivine_lib_bdd::Bdd) -> bool {
|
||||||
self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true()
|
self.is_truth_value() == other.is_truth_value() && self.is_true() == other.is_true()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Representation of Variables
|
/// Representation of variables.
|
||||||
/// Note that the algorithm only uses [usize] values to identify variables.
|
/// Note that the algorithm only uses [usize] values to identify variables.
|
||||||
/// The order of these values will be defining for the Variable order of the decision diagram.
|
/// The order of these values will be defining for the [variable][Var] order of the roBDD.
|
||||||
#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
|
#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
|
||||||
pub struct Var(pub usize);
|
pub struct Var(pub usize);
|
||||||
|
|
||||||
@ -112,28 +124,28 @@ impl Display for Var {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Var {
|
impl Var {
|
||||||
/// Represents the constant symbol "Top"
|
/// Represents the constant symbol "⊤", which stands for the "verum" concept.
|
||||||
pub const TOP: Var = Var(usize::MAX);
|
pub const TOP: Var = Var(usize::MAX);
|
||||||
/// Represents the constant symbol "Bot"
|
/// Represents the constant symbol "⊥", which stands for the "falsum" concept.
|
||||||
pub const BOT: Var = Var(usize::MAX - 1);
|
pub const BOT: Var = Var(usize::MAX - 1);
|
||||||
|
|
||||||
/// Returns the value of the [Var] as [usize]
|
/// Returns the value of the [variable][Var] as [usize].
|
||||||
pub fn value(self) -> usize {
|
pub fn value(self) -> usize {
|
||||||
self.0
|
self.0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the value of the variable is a constant (i.e. Top or Bot)
|
/// Returns [true] if the value of the [variable][Var] is a constant (i.e., [BOT][Var::BOT] or [TOP][Var::TOP]).
|
||||||
pub fn is_constant(&self) -> bool {
|
pub fn is_constant(&self) -> bool {
|
||||||
self.value() >= Var::BOT.value()
|
self.value() >= Var::BOT.value()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A [BddNode] is representing one Node in the decision diagram
|
/// A [BddNode] is representing one Node in the decision diagram.
|
||||||
///
|
///
|
||||||
/// Intuitively this is a binary tree structure, where the diagram is allowed to
|
/// Intuitively this is a binary tree structure, where the diagram is allowed to
|
||||||
/// pool same values to the same Node.
|
/// pool same values to the same Node.
|
||||||
#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
|
#[derive(Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)]
|
||||||
pub(crate) struct BddNode {
|
pub struct BddNode {
|
||||||
var: Var,
|
var: Var,
|
||||||
lo: Term,
|
lo: Term,
|
||||||
hi: Term,
|
hi: Term,
|
||||||
@ -145,28 +157,34 @@ impl Display for BddNode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for BddNode {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::top_node()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl BddNode {
|
impl BddNode {
|
||||||
/// Creates a new Node
|
/// Creates a new Node.
|
||||||
pub fn new(var: Var, lo: Term, hi: Term) -> Self {
|
pub fn new(var: Var, lo: Term, hi: Term) -> Self {
|
||||||
Self { var, lo, hi }
|
Self { var, lo, hi }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the current Variable-value
|
/// Returns the current Variable-value.
|
||||||
pub fn var(self) -> Var {
|
pub fn var(self) -> Var {
|
||||||
self.var
|
self.var
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `lo`-branch
|
/// Returns the `lo`-branch.
|
||||||
pub fn lo(self) -> Term {
|
pub fn lo(self) -> Term {
|
||||||
self.lo
|
self.lo
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `hi`-branch
|
/// Returns the `hi`-branch.
|
||||||
pub fn hi(self) -> Term {
|
pub fn hi(self) -> Term {
|
||||||
self.hi
|
self.hi
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a node, which represents the `Bot`-truth value
|
/// Creates a node, which represents the `Bot`-truth value.
|
||||||
pub fn bot_node() -> Self {
|
pub fn bot_node() -> Self {
|
||||||
Self {
|
Self {
|
||||||
var: Var::BOT,
|
var: Var::BOT,
|
||||||
@ -175,7 +193,7 @@ impl BddNode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a node, which represents the `Top`-truth value
|
/// Creates a node, which represents the `Top`-truth value.
|
||||||
pub fn top_node() -> Self {
|
pub fn top_node() -> Self {
|
||||||
Self {
|
Self {
|
||||||
var: Var::TOP,
|
var: Var::TOP,
|
||||||
@ -185,22 +203,25 @@ impl BddNode {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Type alias for the pair of counter-models and models
|
/// Represents the pair of counts, related to counter-models and models.
|
||||||
|
///
|
||||||
|
/// A model of a formula is an interpretation such that the formula evaluates to true with respect to the interpretation.
|
||||||
|
/// A counter-model of a formula is an interpretation such that the formula evaluates to false with respect to the interpretation.
|
||||||
#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord)]
|
#[derive(Debug, Clone, Copy, Eq, PartialEq, PartialOrd, Ord)]
|
||||||
pub struct ModelCounts {
|
pub struct ModelCounts {
|
||||||
/// Contains the number of counter-models
|
/// Contains the number of counter-models.
|
||||||
pub cmodels: usize,
|
pub cmodels: usize,
|
||||||
/// Contains the number of models
|
/// Contains the number of models.
|
||||||
pub models: usize,
|
pub models: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ModelCounts {
|
impl ModelCounts {
|
||||||
/// Represents the top-node model-counts
|
/// Represents the top-node model-counts.
|
||||||
pub fn top() -> ModelCounts {
|
pub fn top() -> ModelCounts {
|
||||||
(0, 1).into()
|
(0, 1).into()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Represents the bot-node model-counts
|
/// Represents the bot-node model-counts.
|
||||||
pub fn bot() -> ModelCounts {
|
pub fn bot() -> ModelCounts {
|
||||||
(1, 0).into()
|
(1, 0).into()
|
||||||
}
|
}
|
||||||
@ -210,8 +231,8 @@ impl ModelCounts {
|
|||||||
self.models.min(self.cmodels)
|
self.models.min(self.cmodels)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true, if there are more models than counter-models.
|
/// Returns [true], if there are more models than counter-models.
|
||||||
/// If they are equal, the function returns true too.
|
/// If they are equal, the function returns [true] too.
|
||||||
pub fn more_models(&self) -> bool {
|
pub fn more_models(&self) -> bool {
|
||||||
self.models >= self.minimum()
|
self.models >= self.minimum()
|
||||||
}
|
}
|
||||||
@ -225,9 +246,9 @@ impl From<(usize, usize)> for ModelCounts {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/// Type alias for the Modelcounts and the depth of a given Node in a BDD
|
/// Type alias for the [Modelcounts][ModelCounts], count of paths to ⊥ respectively ⊤, and the depth of a given node in an roBDD.
|
||||||
pub type CountNode = (ModelCounts, usize);
|
pub type CountNode = (ModelCounts, ModelCounts, usize);
|
||||||
/// Type alias for Facet counts, which contains number of facets and counter facets.
|
/// Type alias for [Facet counts][FacetCounts], which contains the number of facets and counter-facets.
|
||||||
pub type FacetCounts = (usize, usize);
|
pub type FacetCounts = (usize, usize);
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -254,8 +275,8 @@ mod test {
|
|||||||
let term: Term = Term::from(value);
|
let term: Term = Term::from(value);
|
||||||
let var = Var::from(value);
|
let var = Var::from(value);
|
||||||
// display
|
// display
|
||||||
assert_eq!(format!("{}", term), format!("Term({})", value));
|
assert_eq!(format!("{term}"), format!("Term({})", value));
|
||||||
assert_eq!(format!("{}", var), format!("Var({})", value));
|
assert_eq!(format!("{var}"), format!("Var({})", value));
|
||||||
//deref
|
//deref
|
||||||
assert_eq!(value, *term);
|
assert_eq!(value, *term);
|
||||||
true
|
true
|
||||||
|
|||||||
234
lib/src/lib.rs
@ -1,42 +1,43 @@
|
|||||||
/*!
|
/*!
|
||||||
This library contains an efficient representation of `Abstract Dialectical Frameworks (ADf)` by utilising an implementation of `Ordered Binary Decision Diagrams (OBDD)`
|
This library contains an efficient representation of `Abstract Dialectical Frameworks (ADF)` by utilising an implementation of `Ordered Binary Decision Diagrams (OBDD)`
|
||||||
|
|
||||||
# Abstract Dialectical Frameworks
|
# Abstract Dialectical Frameworks
|
||||||
An `abstract dialectical framework` consists of abstract statements. Each statement has an unique label and might be related to other statements (s) in the ADF. This relation is defined by a so-called acceptance condition (ac), which intuitively is a propositional formula, where the variable symbols are the labels of the statements. An interpretation is a three valued function which maps to each statement a truth value (true, false, undecided). We call such an interpretation a model, if each acceptance condition agrees to the interpration.
|
An `abstract dialectical framework` consists of abstract statements. Each statement has a unique label and might be related to other statements (s) in the ADF. This relation is defined by a so-called acceptance condition (ac), which intuitively is a propositional formula, where the variable symbols are the labels of the statements. An interpretation is a three valued function which maps to each statement a truth value (true, false, undecided). We call such an interpretation a model, if each acceptance condition agrees to the interpretation.
|
||||||
# Ordered Binary Decision Diagram
|
|
||||||
An `ordered binary decision diagram` is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap.
|
|
||||||
|
|
||||||
Note that one advantage of this implementation is that only one oBDD is used for all acceptance conditions. This can be done because all of them have the identical signature (i.e. the set of all statements + top and bottom concepts).
|
|
||||||
Due to this uniform representation reductions on subformulae which are shared by two or more statements only need to be computed once and is already cached in the data structure for further applications.
|
|
||||||
|
|
||||||
The used algorithm to create a BDD, based on a given formula does not perform well on bigger formulae, therefore it is possible to use a state-of-the art library to instantiate the BDD (<https://github.com/sybila/biodivine-lib-bdd>).
|
|
||||||
It is possible to either stay with the biodivine library or switch back to the variant implemented by adf-bdd.
|
|
||||||
The variant implemented in this library offers reuse of already done reductions and memoisation techniques, which are not offered by biodivine.
|
|
||||||
In addition some further features, like counter-model counting is not supported by biodivine.
|
|
||||||
|
|
||||||
Note that import and export only works if the naive library is chosen
|
|
||||||
|
|
||||||
## Noteworthy relations between semantics
|
## Noteworthy relations between semantics
|
||||||
They can be easily identified though:
|
|
||||||
- The computation is always in the same order
|
- The computation is always in the same order
|
||||||
- grd
|
- grd
|
||||||
- com
|
- com
|
||||||
- stm
|
- stm
|
||||||
- We know that there is always exactly one grounded model
|
- We know that there is always exactly one grounded model
|
||||||
- We know that there always exist at least one complete model (i.e. the grounded one)
|
- We know that there always exists at least one complete model (i.e., the grounded one)
|
||||||
- We know that there does not need to exist a stable model
|
- We know that there does not need to exist a stable model
|
||||||
- We know that every stable model is a complete model too
|
- We know that every stable model is a complete model too
|
||||||
|
|
||||||
# Input-file format:
|
# Reduced Ordered Binary Decision Diagram (roBDD)
|
||||||
Each statement is defined by an ASP-style unary predicate s, where the enclosed term represents the label of the statement.
|
A `reduced ordered binary decision diagram` is a normalised representation of binary functions, where satisfiability- and validity checks can be done relatively cheap and no redundant information is stored.
|
||||||
The binary predicate ac relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
|
||||||
- and(x,y): conjunction
|
Note that one advantage of this implementation is that only one structure is used for all acceptance conditions. This can be done because all of them have the identical signature (i.e., the set of all statements + top and bottom concepts).
|
||||||
- or(x,y): disjunctin
|
Due to this uniform representation reductions on subformulae which are shared by two or more statements only need to be computed once and will be cached in the data structure for further applications.
|
||||||
- iff(x,Y): if and only if
|
|
||||||
- xor(x,y): exclusive or
|
The naively used algorithm to create an roBDD, based on a given formula does not perform well on bigger formulae, therefore it is possible to use a state-of-the art library to instantiate the roBDD (<https://github.com/sybila/biodivine-lib-bdd>).
|
||||||
- neg(x): classical negation
|
It is possible to either stay with the biodivine library or switch back to the variant implemented by adf-bdd.
|
||||||
- c(v): constant symbol "verum" - tautology/top
|
The variant implemented in this library offers reuse of already done reductions and memoisation techniques, which are not offered by biodivine.
|
||||||
- c(f): constant symbol "falsum" - inconsistency/bot
|
In addition some further features, like counter-model counting is not supported by biodivine.
|
||||||
|
|
||||||
|
Note that import and export only works if the naive library is chosen.
|
||||||
|
|
||||||
|
# Input-file format
|
||||||
|
Each statement is defined by an ASP-style unary predicate `s`, where the enclosed term represents the label of the statement.
|
||||||
|
The binary predicate `ac` relates each statement to one propositional formula in prefix notation, with the logical operations and constants as follows:
|
||||||
|
- `and(x,y)`: conjunction
|
||||||
|
- `or(x,y)`: disjunction
|
||||||
|
- `iff(x,Y)`: if and only if
|
||||||
|
- `xor(x,y)`: exclusive or
|
||||||
|
- `neg(x)`: classical negation
|
||||||
|
- `c(v)`: constant symbol "verum" - tautology/top
|
||||||
|
- `c(f)`: constant symbol "falsum" - inconsistency/bot
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@ -159,11 +160,188 @@ for model in adf.complete() {
|
|||||||
print!("{}", printer.print_interpretation(&model));
|
print!("{}", printer.print_interpretation(&model));
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Using the [`NoGood`][crate::nogoods::NoGood]-learner approach, together with the [`crossbeam-channel`] implementation
|
||||||
|
This can be used to have a worker and a consumer thread to print the results as they are computed.
|
||||||
|
Please note that the [`NoGood`][crate::nogoods::NoGood]-learner needs a heuristics function to work.
|
||||||
|
The enum [`Heuristic`][crate::adf::heuristics::Heuristic] allows one to choose a pre-defined heuristic, or implement a `Custom` one.
|
||||||
|
```rust
|
||||||
|
use adf_bdd::parser::AdfParser;
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
use adf_bdd::adf::heuristics::Heuristic;
|
||||||
|
use adf_bdd::datatypes::{Term, adf::VarContainer};
|
||||||
|
// create a channel
|
||||||
|
let (s, r) = crossbeam_channel::unbounded();
|
||||||
|
let variables = VarContainer::default();
|
||||||
|
let variables_worker = variables.clone();
|
||||||
|
// spawn a solver thread
|
||||||
|
let solving = std::thread::spawn(move || {
|
||||||
|
// use the above example as input
|
||||||
|
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
|
||||||
|
let parser = AdfParser::with_var_container(variables_worker);
|
||||||
|
parser.parse()(&input).expect("parsing worked well");
|
||||||
|
// use hybrid approach
|
||||||
|
let mut adf = adf_bdd::adfbiodivine::Adf::from_parser(&parser).hybrid_step();
|
||||||
|
// compute stable with the simple heuristic
|
||||||
|
adf.stable_nogood_channel(Heuristic::Simple, s);
|
||||||
|
});
|
||||||
|
|
||||||
|
let printer = variables.print_dictionary();
|
||||||
|
// print results as they are computed
|
||||||
|
while let Ok(result) = r.recv() {
|
||||||
|
print!("stable model: {:?} \n", result);
|
||||||
|
// use dictionary
|
||||||
|
print!("stable model with variable names: {}", printer.print_interpretation(&result));
|
||||||
|
# assert_eq!(result, vec![Term(1),Term(1),Term(0),Term(0)]);
|
||||||
|
}
|
||||||
|
// waiting for the other thread to close
|
||||||
|
solving.join().unwrap();
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Serialize and Deserialize custom datastructures representing an [`adf::Adf`]
|
||||||
|
|
||||||
|
The Web Application <https://adf-bdd.dev> uses custom datastructures that are stored in a mongodb which inspired this example.
|
||||||
|
|
||||||
|
```rust
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use adf_bdd::datatypes::adf::VarContainer;
|
||||||
|
use adf_bdd::datatypes::{BddNode, Term, Var};
|
||||||
|
use adf_bdd::obdd::Bdd;
|
||||||
|
use adf_bdd::parser::AdfParser;
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
|
||||||
|
// Custom Datastructures for (De-)Serialization
|
||||||
|
|
||||||
|
# #[derive(PartialEq, Debug)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
struct MyCustomVarContainer {
|
||||||
|
names: Vec<String>,
|
||||||
|
mapping: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<VarContainer> for MyCustomVarContainer {
|
||||||
|
fn from(source: VarContainer) -> Self {
|
||||||
|
Self {
|
||||||
|
names: source.names().read().unwrap().clone(),
|
||||||
|
mapping: source
|
||||||
|
.mappings()
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (k.clone(), v.to_string()))
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<MyCustomVarContainer> for VarContainer {
|
||||||
|
fn from(source: MyCustomVarContainer) -> Self {
|
||||||
|
Self::from_parser(
|
||||||
|
Arc::new(RwLock::new(source.names)),
|
||||||
|
Arc::new(RwLock::new(
|
||||||
|
source
|
||||||
|
.mapping
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k, v.parse().unwrap()))
|
||||||
|
.collect(),
|
||||||
|
)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# #[derive(PartialEq, Debug)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
struct MyCustomBddNode {
|
||||||
|
var: String,
|
||||||
|
lo: String,
|
||||||
|
hi: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BddNode> for MyCustomBddNode {
|
||||||
|
fn from(source: BddNode) -> Self {
|
||||||
|
Self {
|
||||||
|
var: source.var().0.to_string(),
|
||||||
|
lo: source.lo().0.to_string(),
|
||||||
|
hi: source.hi().0.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<MyCustomBddNode> for BddNode {
|
||||||
|
fn from(source: MyCustomBddNode) -> Self {
|
||||||
|
Self::new(
|
||||||
|
Var(source.var.parse().unwrap()),
|
||||||
|
Term(source.lo.parse().unwrap()),
|
||||||
|
Term(source.hi.parse().unwrap()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# #[derive(PartialEq, Debug)]
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
struct MyCustomAdf {
|
||||||
|
ordering: MyCustomVarContainer,
|
||||||
|
bdd: Vec<MyCustomBddNode>,
|
||||||
|
ac: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Adf> for MyCustomAdf {
|
||||||
|
fn from(source: Adf) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: source.ordering.into(),
|
||||||
|
bdd: source.bdd.nodes.into_iter().map(Into::into).collect(),
|
||||||
|
ac: source.ac.into_iter().map(|t| t.0.to_string()).collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<MyCustomAdf> for Adf {
|
||||||
|
fn from(source: MyCustomAdf) -> Self {
|
||||||
|
let bdd = Bdd::from(source.bdd.into_iter().map(Into::into).collect::<Vec<BddNode>>());
|
||||||
|
|
||||||
|
Adf::from((
|
||||||
|
source.ordering.into(),
|
||||||
|
bdd,
|
||||||
|
source
|
||||||
|
.ac
|
||||||
|
.into_iter()
|
||||||
|
.map(|t| Term(t.parse().unwrap()))
|
||||||
|
.collect(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// use the above example as input
|
||||||
|
let input = "s(a).s(b).s(c).s(d).ac(a,c(v)).ac(b,or(a,b)).ac(c,neg(b)).ac(d,d).";
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
parser.parse()(&input).unwrap();
|
||||||
|
|
||||||
|
// create Adf
|
||||||
|
let adf = Adf::from_parser(&parser);
|
||||||
|
|
||||||
|
// cast into custom struct
|
||||||
|
let my_custom_adf: MyCustomAdf = adf.into();
|
||||||
|
|
||||||
|
// stringify to json
|
||||||
|
let json: String = serde_json::to_string(&my_custom_adf).unwrap();
|
||||||
|
|
||||||
|
// parse json
|
||||||
|
let parsed_custom_adf: MyCustomAdf = serde_json::from_str(&json).unwrap();
|
||||||
|
|
||||||
|
// cast into lib struct that resembles the original Adf
|
||||||
|
let parsed_adf: Adf = parsed_custom_adf.into();
|
||||||
|
|
||||||
|
# let my_custom_adf2: MyCustomAdf = parsed_adf.into();
|
||||||
|
# assert_eq!(my_custom_adf, my_custom_adf2);
|
||||||
|
```
|
||||||
|
|
||||||
*/
|
*/
|
||||||
#![deny(
|
#![deny(
|
||||||
missing_debug_implementations,
|
missing_debug_implementations,
|
||||||
missing_copy_implementations,
|
missing_copy_implementations,
|
||||||
missing_copy_implementations,
|
|
||||||
trivial_casts,
|
trivial_casts,
|
||||||
trivial_numeric_casts,
|
trivial_numeric_casts,
|
||||||
unsafe_code
|
unsafe_code
|
||||||
@ -179,8 +357,8 @@ for model in adf.complete() {
|
|||||||
pub mod adf;
|
pub mod adf;
|
||||||
pub mod adfbiodivine;
|
pub mod adfbiodivine;
|
||||||
pub mod datatypes;
|
pub mod datatypes;
|
||||||
|
pub mod nogoods;
|
||||||
pub mod obdd;
|
pub mod obdd;
|
||||||
pub mod parser;
|
pub mod parser;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test;
|
mod test;
|
||||||
//pub mod obdd2;
|
|
||||||
|
|||||||
812
lib/src/nogoods.rs
Normal file
@ -0,0 +1,812 @@
|
|||||||
|
//! Collection of all nogood-related structures.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
fmt::{Debug, Display},
|
||||||
|
ops::{BitAnd, BitOr, BitXor, BitXorAssign},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::datatypes::Term;
|
||||||
|
use roaring::RoaringBitmap;
|
||||||
|
|
||||||
|
/// A [NoGood] and an [Interpretation] can be represented by the same structure.
|
||||||
|
/// Moreover this duality (i.e. an [Interpretation] becomes a [NoGood] is reflected by this type alias.
|
||||||
|
pub type Interpretation = NoGood;
|
||||||
|
|
||||||
|
/// Representation of a nogood by a pair of [Bitmaps][RoaringBitmap]
|
||||||
|
#[derive(Debug, Default, Clone)]
|
||||||
|
pub struct NoGood {
|
||||||
|
active: RoaringBitmap,
|
||||||
|
value: RoaringBitmap,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Eq for NoGood {}
|
||||||
|
impl PartialEq for NoGood {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
(&self.active).bitxor(&other.active).is_empty()
|
||||||
|
&& (&self.value).bitxor(&other.value).is_empty()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NoGood {
|
||||||
|
/// Creates an [Interpretation] from a given Vector of [Terms][Term].
|
||||||
|
pub fn from_term_vec(term_vec: &[Term]) -> Interpretation {
|
||||||
|
let mut result = Self::default();
|
||||||
|
term_vec.iter().enumerate().for_each(|(idx, val)| {
|
||||||
|
let idx:u32 = idx.try_into().expect("no-good learner implementation is based on the assumption that only u32::MAX-many variables are in place");
|
||||||
|
if val.is_truth_value() {
|
||||||
|
result.active.insert(idx);
|
||||||
|
if val.is_true() {
|
||||||
|
result.value.insert(idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a [NoGood] representing an atomic assignment.
|
||||||
|
pub fn new_single_nogood(pos: usize, val: bool) -> NoGood {
|
||||||
|
let mut result = Self::default();
|
||||||
|
let pos:u32 = pos.try_into().expect("nog-good learner implementation is based on the assumption that only u32::MAX-many variables are in place");
|
||||||
|
result.active.insert(pos);
|
||||||
|
if val {
|
||||||
|
result.value.insert(pos);
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns [None] if the pair contains inconsistent pairs.
|
||||||
|
/// Otherwise it returns an [Interpretation] which represents the set values.
|
||||||
|
pub fn try_from_pair_iter(
|
||||||
|
pair_iter: &mut impl Iterator<Item = (usize, bool)>,
|
||||||
|
) -> Option<Interpretation> {
|
||||||
|
let mut result = Self::default();
|
||||||
|
let mut visit = false;
|
||||||
|
for (idx, val) in pair_iter {
|
||||||
|
visit = true;
|
||||||
|
let idx:u32 = idx.try_into().expect("no-good learner implementation is based on the assumption that only u32::MAX-many variables are in place");
|
||||||
|
let is_new = result.active.insert(idx);
|
||||||
|
let upd = if val {
|
||||||
|
result.value.insert(idx)
|
||||||
|
} else {
|
||||||
|
result.value.remove(idx)
|
||||||
|
};
|
||||||
|
// if the state is not new and the value is changed
|
||||||
|
if !is_new && upd {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
visit.then_some(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an updated [`Vec<Term>`], based on the given [&[Term]] and the [NoGood].
|
||||||
|
/// The parameter _update_ is set to [`true`] if there has been an update and to [`false`] otherwise
|
||||||
|
pub fn update_term_vec(&self, term_vec: &[Term], update: &mut bool) -> Vec<Term> {
|
||||||
|
*update = false;
|
||||||
|
term_vec
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(idx, val)| {
|
||||||
|
let idx: u32 = idx.try_into().expect(
|
||||||
|
"no-good learner implementation is based on the assumption \
|
||||||
|
that only u32::MAX-many variables are in place",
|
||||||
|
);
|
||||||
|
if self.active.contains(idx) {
|
||||||
|
if !val.is_truth_value() {
|
||||||
|
*update = true;
|
||||||
|
}
|
||||||
|
if self.value.contains(idx) {
|
||||||
|
Term::TOP
|
||||||
|
} else {
|
||||||
|
Term::BOT
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
*val
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Given a [NoGood] and another one, conclude a non-conflicting value which can be concluded on basis of the given one.
|
||||||
|
pub fn conclude(&self, other: &NoGood) -> Option<(usize, bool)> {
|
||||||
|
log::debug!("conclude: {:?} other {:?}", self, other);
|
||||||
|
let implication = (&self.active).bitxor(&other.active).bitand(&self.active);
|
||||||
|
|
||||||
|
let bothactive = (&self.active).bitand(&other.active);
|
||||||
|
let mut no_matches = (&bothactive).bitand(&other.value);
|
||||||
|
no_matches.bitxor_assign(bothactive.bitand(&self.value));
|
||||||
|
|
||||||
|
if implication.len() == 1 && no_matches.is_empty() {
|
||||||
|
let pos = implication
|
||||||
|
.min()
|
||||||
|
.expect("just checked that there is one element to be found");
|
||||||
|
log::trace!(
|
||||||
|
"Conclude {:?}",
|
||||||
|
Some((pos as usize, !self.value.contains(pos)))
|
||||||
|
);
|
||||||
|
Some((pos as usize, !self.value.contains(pos)))
|
||||||
|
} else {
|
||||||
|
log::trace!("Nothing to Conclude");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the [NoGood] and a second one in a disjunctive (bitor) manner.
|
||||||
|
pub fn disjunction(&mut self, other: &NoGood) {
|
||||||
|
self.active = (&self.active).bitor(&other.active);
|
||||||
|
self.value = (&self.value).bitor(&other.value);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns [true] if the other [Interpretation] matches with all the assignments of the current [NoGood].
|
||||||
|
pub fn is_violating(&self, other: &Interpretation) -> bool {
|
||||||
|
let active = (&self.active).bitand(&other.active);
|
||||||
|
if self.active.len() == active.len() {
|
||||||
|
let lhs = (&active).bitand(&self.value);
|
||||||
|
let rhs = (&active).bitand(&other.value);
|
||||||
|
if lhs.bitxor(rhs).is_empty() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of set (i.e. active) bits.
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.active
|
||||||
|
.len()
|
||||||
|
.try_into()
|
||||||
|
.expect("expecting to be on a 64 bit system")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
/// Returns [true] if the [NoGood] does not set any value.
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.len() == 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&[Term]> for NoGood {
|
||||||
|
fn from(term_vec: &[Term]) -> Self {
|
||||||
|
Self::from_term_vec(term_vec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A structure to store [NoGoods][NoGood] and offer operations and deductions based on them.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NoGoodStore {
|
||||||
|
store: Vec<Vec<NoGood>>,
|
||||||
|
duplicates: DuplicateElemination,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for NoGoodStore {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
writeln!(f, "NoGoodStats: [")?;
|
||||||
|
for (arity, vec) in self.store.iter().enumerate() {
|
||||||
|
writeln!(f, "{arity}: {}", vec.len())?;
|
||||||
|
log::debug!("Nogoods:\n {:?}", vec);
|
||||||
|
}
|
||||||
|
write!(f, "]")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NoGoodStore {
|
||||||
|
/// Creates a new [NoGoodStore] and assumes a size compatible with the underlying [NoGood] implementation.
|
||||||
|
pub fn new(size: u32) -> NoGoodStore {
|
||||||
|
Self {
|
||||||
|
store: vec![Vec::new(); size as usize],
|
||||||
|
duplicates: DuplicateElemination::Equiv,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tries to create a new [NoGoodStore].
|
||||||
|
/// Does not succeed if the size is too big for the underlying [NoGood] implementation.
|
||||||
|
pub fn try_new(size: usize) -> Option<NoGoodStore> {
|
||||||
|
Some(Self::new(size.try_into().ok()?))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the behaviour when managing duplicates.
|
||||||
|
pub fn set_dup_elem(&mut self, mode: DuplicateElemination) {
|
||||||
|
self.duplicates = mode;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a given [NoGood]
|
||||||
|
pub fn add_ng(&mut self, nogood: NoGood) {
|
||||||
|
let mut idx = nogood.len();
|
||||||
|
if idx > 0 {
|
||||||
|
idx -= 1;
|
||||||
|
if match self.duplicates {
|
||||||
|
DuplicateElemination::None => true,
|
||||||
|
DuplicateElemination::Equiv => !self.store[idx].contains(&nogood),
|
||||||
|
DuplicateElemination::Subsume => {
|
||||||
|
self.store
|
||||||
|
.iter_mut()
|
||||||
|
.enumerate()
|
||||||
|
.for_each(|(cur_idx, ng_vec)| {
|
||||||
|
if idx >= cur_idx {
|
||||||
|
ng_vec.retain(|ng| !ng.is_violating(&nogood));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
true
|
||||||
|
}
|
||||||
|
} {
|
||||||
|
self.store[idx].push(nogood);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Draws a (Conclusion)[NoGood], based on the [NoGoodStore] and the given [NoGood].
|
||||||
|
/// *Returns* [None] if there is a conflict
|
||||||
|
pub fn conclusions(&self, nogood: &NoGood) -> Option<NoGood> {
|
||||||
|
let mut result = nogood.clone();
|
||||||
|
log::trace!("ng-store: {:?}", self.store);
|
||||||
|
self.store
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(len, _vec)| *len <= nogood.len())
|
||||||
|
.filter_map(|(_len, val)| {
|
||||||
|
NoGood::try_from_pair_iter(&mut val.iter().filter_map(|ng| ng.conclude(nogood)))
|
||||||
|
})
|
||||||
|
.try_fold(&mut result, |acc, ng| {
|
||||||
|
if ng.is_violating(acc) {
|
||||||
|
log::trace!("ng conclusion violating");
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
acc.disjunction(&ng);
|
||||||
|
Some(acc)
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
if self
|
||||||
|
.store
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(len, _vec)| *len <= nogood.len())
|
||||||
|
.any(|(_, vec)| {
|
||||||
|
vec.iter()
|
||||||
|
.any(|elem| elem.is_violating(&result) || elem.is_violating(nogood))
|
||||||
|
})
|
||||||
|
{
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructs the Closure of the conclusions drawn by the nogoods with respect to the given `interpretation`
|
||||||
|
pub(crate) fn conclusion_closure(&self, interpretation: &[Term]) -> ClosureResult {
|
||||||
|
let mut update = true;
|
||||||
|
let mut result = match self.conclusions(&interpretation.into()) {
|
||||||
|
Some(val) => {
|
||||||
|
log::trace!(
|
||||||
|
"conclusion-closure step 1: val:{:?} -> {:?}",
|
||||||
|
val,
|
||||||
|
val.update_term_vec(interpretation, &mut update)
|
||||||
|
);
|
||||||
|
val.update_term_vec(interpretation, &mut update)
|
||||||
|
}
|
||||||
|
|
||||||
|
None => return ClosureResult::Inconsistent,
|
||||||
|
};
|
||||||
|
if !update {
|
||||||
|
return ClosureResult::NoUpdate;
|
||||||
|
}
|
||||||
|
while update {
|
||||||
|
match self.conclusions(&result.as_slice().into()) {
|
||||||
|
Some(val) => result = val.update_term_vec(&result, &mut update),
|
||||||
|
None => return ClosureResult::Inconsistent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ClosureResult::Update(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allows to define how costly the DuplicateElemination is done.
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub enum DuplicateElemination {
|
||||||
|
/// No Duplicate Detection
|
||||||
|
None,
|
||||||
|
/// Only check weak equivalence
|
||||||
|
Equiv,
|
||||||
|
/// Check for subsumptions
|
||||||
|
Subsume,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If the closure had some issues, it is represented with this enum
|
||||||
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
|
pub(crate) enum ClosureResult {
|
||||||
|
Update(Vec<Term>),
|
||||||
|
NoUpdate,
|
||||||
|
Inconsistent,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClosureResult {
|
||||||
|
/// Dead_code due to (currently) unused utility function for the [ClosureResult] enum.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn is_update(&self) -> bool {
|
||||||
|
matches!(self, Self::Update(_))
|
||||||
|
}
|
||||||
|
/// Dead_code due to (currently) unused utility function for the [ClosureResult] enum.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn is_no_update(&self) -> bool {
|
||||||
|
matches!(self, Self::NoUpdate)
|
||||||
|
}
|
||||||
|
/// Dead_code due to (currently) unused utility function for the [ClosureResult] enum.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn is_inconsistent(&self) -> bool {
|
||||||
|
matches!(self, Self::Inconsistent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryInto<Vec<Term>> for ClosureResult {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
fn try_into(self) -> Result<Vec<Term>, Self::Error> {
|
||||||
|
match self {
|
||||||
|
ClosureResult::Update(val) => Ok(val),
|
||||||
|
ClosureResult::NoUpdate => Err("No update occurred, use the old value instead"),
|
||||||
|
ClosureResult::Inconsistent => Err("Inconsistency occurred"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
use test_log::test;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn create_ng() {
|
||||||
|
let terms = vec![Term::TOP, Term(22), Term(13232), Term::BOT, Term::TOP];
|
||||||
|
let ng = NoGood::from_term_vec(&terms);
|
||||||
|
|
||||||
|
assert_eq!(ng.active.len(), 3);
|
||||||
|
assert_eq!(ng.value.len(), 2);
|
||||||
|
assert!(ng.active.contains(0));
|
||||||
|
assert!(!ng.active.contains(1));
|
||||||
|
assert!(!ng.active.contains(2));
|
||||||
|
assert!(ng.active.contains(3));
|
||||||
|
assert!(ng.active.contains(4));
|
||||||
|
|
||||||
|
assert!(ng.value.contains(0));
|
||||||
|
assert!(!ng.value.contains(1));
|
||||||
|
assert!(!ng.value.contains(2));
|
||||||
|
assert!(!ng.value.contains(3));
|
||||||
|
assert!(ng.value.contains(4));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn conclude() {
|
||||||
|
let ng1 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term::TOP, Term::BOT, Term::TOP]);
|
||||||
|
let ng2 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term(13232), Term::BOT, Term::TOP]);
|
||||||
|
let ng3 = NoGood::from_term_vec(&[
|
||||||
|
Term::TOP,
|
||||||
|
Term(22),
|
||||||
|
Term(13232),
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
]);
|
||||||
|
|
||||||
|
assert_eq!(ng1.conclude(&ng2), Some((2, false)));
|
||||||
|
assert_eq!(ng1.conclude(&ng1), None);
|
||||||
|
assert_eq!(ng2.conclude(&ng1), None);
|
||||||
|
assert_eq!(ng1.conclude(&ng3), Some((2, false)));
|
||||||
|
assert_eq!(ng3.conclude(&ng1), Some((5, true)));
|
||||||
|
assert_eq!(ng3.conclude(&ng2), Some((5, true)));
|
||||||
|
|
||||||
|
// conclusions on empty knowledge
|
||||||
|
let ng4 = NoGood::from_term_vec(&[Term::TOP]);
|
||||||
|
let ng5 = NoGood::from_term_vec(&[Term::BOT]);
|
||||||
|
let ng6 = NoGood::from_term_vec(&[]);
|
||||||
|
|
||||||
|
assert_eq!(ng4.conclude(&ng6), Some((0, false)));
|
||||||
|
assert_eq!(ng5.conclude(&ng6), Some((0, true)));
|
||||||
|
assert_eq!(ng6.conclude(&ng5), None);
|
||||||
|
assert_eq!(ng4.conclude(&ng5), None);
|
||||||
|
|
||||||
|
let ng_a = NoGood::from_term_vec(&[Term::BOT, Term(22)]);
|
||||||
|
let ng_b = NoGood::from_term_vec(&[Term(22), Term::TOP]);
|
||||||
|
|
||||||
|
assert_eq!(ng_a.conclude(&ng_b), Some((0, true)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn violate() {
|
||||||
|
let ng1 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term::TOP, Term::BOT, Term::TOP]);
|
||||||
|
let ng2 = NoGood::from_term_vec(&[Term::TOP, Term(22), Term(13232), Term::BOT, Term::TOP]);
|
||||||
|
let ng3 = NoGood::from_term_vec(&[
|
||||||
|
Term::TOP,
|
||||||
|
Term(22),
|
||||||
|
Term(13232),
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
]);
|
||||||
|
let ng4 = NoGood::from_term_vec(&[Term::TOP]);
|
||||||
|
|
||||||
|
assert!(ng4.is_violating(&ng1));
|
||||||
|
assert!(!ng1.is_violating(&ng4));
|
||||||
|
assert!(ng2.is_violating(&ng3));
|
||||||
|
assert!(!ng3.is_violating(&ng2));
|
||||||
|
|
||||||
|
assert_eq!(ng4, NoGood::new_single_nogood(0, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn add_ng() {
|
||||||
|
let mut ngs = NoGoodStore::new(5);
|
||||||
|
let ng1 = NoGood::from_term_vec(&[Term::TOP]);
|
||||||
|
let ng2 = NoGood::from_term_vec(&[Term(22), Term::TOP]);
|
||||||
|
let ng3 = NoGood::from_term_vec(&[Term(22), Term(22), Term::TOP]);
|
||||||
|
let ng4 = NoGood::from_term_vec(&[Term(22), Term(22), Term(22), Term::TOP]);
|
||||||
|
let ng5 = NoGood::from_term_vec(&[Term::BOT]);
|
||||||
|
|
||||||
|
assert!(!ng1.is_violating(&ng5));
|
||||||
|
assert!(ng1.is_violating(&ng1));
|
||||||
|
|
||||||
|
ngs.add_ng(ng1.clone());
|
||||||
|
ngs.add_ng(ng2.clone());
|
||||||
|
ngs.add_ng(ng3.clone());
|
||||||
|
ngs.add_ng(ng4.clone());
|
||||||
|
ngs.add_ng(ng5.clone());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ngs.store
|
||||||
|
.iter()
|
||||||
|
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
|
||||||
|
5
|
||||||
|
);
|
||||||
|
|
||||||
|
ngs.set_dup_elem(DuplicateElemination::Equiv);
|
||||||
|
|
||||||
|
ngs.add_ng(ng1.clone());
|
||||||
|
ngs.add_ng(ng2.clone());
|
||||||
|
ngs.add_ng(ng3.clone());
|
||||||
|
ngs.add_ng(ng4.clone());
|
||||||
|
ngs.add_ng(ng5.clone());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ngs.store
|
||||||
|
.iter()
|
||||||
|
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
|
||||||
|
5
|
||||||
|
);
|
||||||
|
ngs.set_dup_elem(DuplicateElemination::Subsume);
|
||||||
|
ngs.add_ng(ng1);
|
||||||
|
ngs.add_ng(ng2);
|
||||||
|
ngs.add_ng(ng3);
|
||||||
|
ngs.add_ng(ng4);
|
||||||
|
ngs.add_ng(ng5);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ngs.store
|
||||||
|
.iter()
|
||||||
|
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
|
||||||
|
5
|
||||||
|
);
|
||||||
|
|
||||||
|
ngs.add_ng(NoGood::from_term_vec(&[Term(22), Term::BOT, Term(22)]));
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ngs.store
|
||||||
|
.iter()
|
||||||
|
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
|
||||||
|
6
|
||||||
|
);
|
||||||
|
|
||||||
|
ngs.add_ng(NoGood::from_term_vec(&[Term(22), Term::BOT, Term::BOT]));
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ngs.store
|
||||||
|
.iter()
|
||||||
|
.fold(0, |acc, ng_vec| { acc + ng_vec.len() }),
|
||||||
|
6
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(NoGood::from_term_vec(&[Term(22), Term::BOT, Term(22)])
|
||||||
|
.is_violating(&NoGood::from_term_vec(&[Term(22), Term::BOT, Term::BOT])));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn ng_store_conclusions() {
|
||||||
|
let mut ngs = NoGoodStore::new(5);
|
||||||
|
|
||||||
|
let ng1 = NoGood::from_term_vec(&[Term::BOT]);
|
||||||
|
|
||||||
|
ngs.add_ng(ng1.clone());
|
||||||
|
assert_eq!(ng1.conclude(&ng1), None);
|
||||||
|
assert_eq!(
|
||||||
|
ng1.conclude(&NoGood::from_term_vec(&[Term(33)])),
|
||||||
|
Some((0, true))
|
||||||
|
);
|
||||||
|
assert_eq!(ngs.conclusions(&ng1), None);
|
||||||
|
assert_ne!(ngs.conclusions(&NoGood::from_term_vec(&[Term(33)])), None);
|
||||||
|
assert_eq!(
|
||||||
|
ngs.conclusions(&NoGood::from_term_vec(&[Term(33)]))
|
||||||
|
.expect("just checked with prev assertion")
|
||||||
|
.update_term_vec(&[Term(33)], &mut false),
|
||||||
|
vec![Term::TOP]
|
||||||
|
);
|
||||||
|
|
||||||
|
let ng2 = NoGood::from_term_vec(&[Term(123), Term::TOP, Term(234), Term(345)]);
|
||||||
|
let ng3 = NoGood::from_term_vec(&[Term::TOP, Term::BOT, Term::TOP, Term(345)]);
|
||||||
|
|
||||||
|
ngs.add_ng(ng2);
|
||||||
|
ngs.add_ng(ng3);
|
||||||
|
|
||||||
|
log::debug!("issues start here");
|
||||||
|
assert!(ngs
|
||||||
|
.conclusions(&NoGood::from_term_vec(&[Term::TOP]))
|
||||||
|
.is_some());
|
||||||
|
assert_eq!(
|
||||||
|
ngs.conclusions(&[Term::TOP].as_slice().into())
|
||||||
|
.expect("just checked with prev assertion")
|
||||||
|
.update_term_vec(&[Term::TOP, Term(4), Term(5), Term(6), Term(7)], &mut false),
|
||||||
|
vec![Term::TOP, Term::BOT, Term(5), Term(6), Term(7)]
|
||||||
|
);
|
||||||
|
assert!(ngs
|
||||||
|
.conclusions(&NoGood::from_term_vec(&[
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term(5),
|
||||||
|
Term(6),
|
||||||
|
Term(7)
|
||||||
|
]))
|
||||||
|
.is_some());
|
||||||
|
|
||||||
|
ngs = NoGoodStore::new(10);
|
||||||
|
ngs.add_ng([Term::BOT].as_slice().into());
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term::TOP, Term::BOT, Term(33), Term::TOP]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term::TOP, Term::BOT, Term(33), Term(33), Term::BOT]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng([Term::TOP, Term::TOP].as_slice().into());
|
||||||
|
|
||||||
|
let interpr: Vec<Term> = vec![
|
||||||
|
Term(123),
|
||||||
|
Term(233),
|
||||||
|
Term(345),
|
||||||
|
Term(456),
|
||||||
|
Term(567),
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000),
|
||||||
|
];
|
||||||
|
let concl = ngs.conclusions(&interpr.as_slice().into());
|
||||||
|
assert_eq!(concl, Some(NoGood::from_term_vec(&[Term::TOP])));
|
||||||
|
let mut update = false;
|
||||||
|
let new_interpr = concl
|
||||||
|
.expect("just tested in assert")
|
||||||
|
.update_term_vec(&interpr, &mut update);
|
||||||
|
assert_eq!(
|
||||||
|
new_interpr,
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term(233),
|
||||||
|
Term(345),
|
||||||
|
Term(456),
|
||||||
|
Term(567),
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
assert!(update);
|
||||||
|
|
||||||
|
let new_int_2 = ngs
|
||||||
|
.conclusions(&new_interpr.as_slice().into())
|
||||||
|
.map(|val| val.update_term_vec(&new_interpr, &mut update))
|
||||||
|
.expect("Should return a value");
|
||||||
|
assert_eq!(
|
||||||
|
new_int_2,
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term(345),
|
||||||
|
Term(456),
|
||||||
|
Term(567),
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
assert!(update);
|
||||||
|
|
||||||
|
let new_int_3 = ngs
|
||||||
|
.conclusions(&new_int_2.as_slice().into())
|
||||||
|
.map(|val| val.update_term_vec(&new_int_2, &mut update))
|
||||||
|
.expect("Should return a value");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
new_int_3,
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term(345),
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
assert!(update);
|
||||||
|
|
||||||
|
let concl4 = ngs.conclusions(&new_int_3.as_slice().into());
|
||||||
|
assert_ne!(concl4, None);
|
||||||
|
|
||||||
|
let new_int_4 = ngs
|
||||||
|
.conclusions(&new_int_3.as_slice().into())
|
||||||
|
.map(|val| val.update_term_vec(&new_int_3, &mut update))
|
||||||
|
.expect("Should return a value");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
new_int_4,
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term(345),
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
assert!(!update);
|
||||||
|
|
||||||
|
// inconsistence
|
||||||
|
let interpr = vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term(111),
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000),
|
||||||
|
];
|
||||||
|
|
||||||
|
assert_eq!(ngs.conclusions(&interpr.as_slice().into()), None);
|
||||||
|
|
||||||
|
ngs = NoGoodStore::new(6);
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term(1), Term(1), Term(1), Term(0), Term(0), Term(1)]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term(1), Term(1), Term(8), Term(0), Term(0), Term(11)]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng([Term(22), Term(1)].as_slice().into());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ngs.conclusions(
|
||||||
|
&[Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
Some(NoGood::from_term_vec(&[
|
||||||
|
Term(1),
|
||||||
|
Term(0),
|
||||||
|
Term(3),
|
||||||
|
Term(9),
|
||||||
|
Term(0),
|
||||||
|
Term(1)
|
||||||
|
]))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn conclusion_closure() {
|
||||||
|
let mut ngs = NoGoodStore::new(10);
|
||||||
|
ngs.add_ng([Term::BOT].as_slice().into());
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term::TOP, Term::BOT, Term(33), Term::TOP]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term::TOP, Term::BOT, Term(33), Term(33), Term::BOT]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng([Term::TOP, Term::TOP].as_slice().into());
|
||||||
|
|
||||||
|
let interpr: Vec<Term> = vec![
|
||||||
|
Term(123),
|
||||||
|
Term(233),
|
||||||
|
Term(345),
|
||||||
|
Term(456),
|
||||||
|
Term(567),
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000),
|
||||||
|
];
|
||||||
|
|
||||||
|
let result = ngs.conclusion_closure(&interpr);
|
||||||
|
assert!(result.is_update());
|
||||||
|
let resultint: Vec<Term> = result.try_into().expect("just checked conversion");
|
||||||
|
assert_eq!(
|
||||||
|
resultint,
|
||||||
|
vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term(345),
|
||||||
|
Term::BOT,
|
||||||
|
Term::TOP,
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000)
|
||||||
|
]
|
||||||
|
);
|
||||||
|
let result_no_upd = ngs.conclusion_closure(&resultint);
|
||||||
|
|
||||||
|
assert!(result_no_upd.is_no_update());
|
||||||
|
assert_eq!(
|
||||||
|
<ClosureResult as TryInto<Vec<Term>>>::try_into(result_no_upd)
|
||||||
|
.expect_err("just checked that it is an error"),
|
||||||
|
"No update occurred, use the old value instead"
|
||||||
|
);
|
||||||
|
|
||||||
|
let inconsistent_interpr = vec![
|
||||||
|
Term::TOP,
|
||||||
|
Term::TOP,
|
||||||
|
Term::BOT,
|
||||||
|
Term::BOT,
|
||||||
|
Term(111),
|
||||||
|
Term(678),
|
||||||
|
Term(789),
|
||||||
|
Term(899),
|
||||||
|
Term(999),
|
||||||
|
Term(1000),
|
||||||
|
];
|
||||||
|
let result_inconsistent = ngs.conclusion_closure(&inconsistent_interpr);
|
||||||
|
|
||||||
|
assert!(result_inconsistent.is_inconsistent());
|
||||||
|
assert_eq!(
|
||||||
|
<ClosureResult as TryInto<Vec<Term>>>::try_into(result_inconsistent)
|
||||||
|
.expect_err("just checked that it is an error"),
|
||||||
|
"Inconsistency occurred"
|
||||||
|
);
|
||||||
|
|
||||||
|
ngs = NoGoodStore::new(6);
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term(1), Term(1), Term(1), Term(0), Term(0), Term(1)]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng(
|
||||||
|
[Term(1), Term(1), Term(8), Term(0), Term(0), Term(11)]
|
||||||
|
.as_slice()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
ngs.add_ng([Term(22), Term(1)].as_slice().into());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ngs.conclusion_closure(&[Term(1), Term(3), Term(3), Term(9), Term(0), Term(1)]),
|
||||||
|
ClosureResult::Update(vec![Term(1), Term(0), Term(3), Term(9), Term(0), Term(1)])
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
411
lib/src/obdd.rs
@ -1,13 +1,20 @@
|
|||||||
//! Represents an obdd
|
//! Module which represents obdds.
|
||||||
|
//!
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
pub mod frontend;
|
||||||
pub mod vectorize;
|
pub mod vectorize;
|
||||||
use crate::datatypes::*;
|
use crate::datatypes::*;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
use std::{cell::RefCell, cmp::min, collections::HashMap, fmt::Display};
|
use std::{cell::RefCell, cmp::min, collections::HashMap, fmt::Display};
|
||||||
|
|
||||||
|
/// Contains the data of (possibly) multiple roBDDs, managed over one collection of nodes.
|
||||||
|
/// It has a couple of methods to instantiate, update, and query properties on a given roBDD.
|
||||||
|
/// Each roBDD is identified by its corresponding [`Term`], which implicitly identifies the root node of a roBDD.
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub(crate) struct Bdd {
|
pub struct Bdd {
|
||||||
pub(crate) nodes: Vec<BddNode>,
|
/// The nodes of the [`Bdd`] with their edges
|
||||||
|
pub nodes: Vec<BddNode>,
|
||||||
#[cfg(feature = "variablelist")]
|
#[cfg(feature = "variablelist")]
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
var_deps: Vec<HashSet<Var>>,
|
var_deps: Vec<HashSet<Var>>,
|
||||||
@ -15,6 +22,16 @@ pub(crate) struct Bdd {
|
|||||||
cache: HashMap<BddNode, Term>,
|
cache: HashMap<BddNode, Term>,
|
||||||
#[serde(skip, default = "Bdd::default_count_cache")]
|
#[serde(skip, default = "Bdd::default_count_cache")]
|
||||||
count_cache: RefCell<HashMap<Term, CountNode>>,
|
count_cache: RefCell<HashMap<Term, CountNode>>,
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
#[serde(skip)]
|
||||||
|
sender: Option<crossbeam_channel::Sender<BddNode>>,
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
#[serde(skip)]
|
||||||
|
receiver: Option<crossbeam_channel::Receiver<BddNode>>,
|
||||||
|
#[serde(skip)]
|
||||||
|
ite_cache: HashMap<(Term, Term, Term), Term>,
|
||||||
|
#[serde(skip)]
|
||||||
|
restrict_cache: HashMap<(Term, Var, bool), Term>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for Bdd {
|
impl Display for Bdd {
|
||||||
@ -27,7 +44,27 @@ impl Display for Bdd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for Bdd {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Vec<BddNode>> for Bdd {
|
||||||
|
fn from(nodes: Vec<BddNode>) -> Self {
|
||||||
|
let mut bdd = Self::new();
|
||||||
|
|
||||||
|
for node in nodes {
|
||||||
|
bdd.node(node.var(), node.lo(), node.hi());
|
||||||
|
}
|
||||||
|
|
||||||
|
bdd
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Bdd {
|
impl Bdd {
|
||||||
|
/// Instantiate a new roBDD structure.
|
||||||
|
/// Constants for the [`⊤`][crate::datatypes::Term::TOP] and [`⊥`][crate::datatypes::Term::BOT] concepts are prepared in that step too.
|
||||||
pub fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
#[cfg(not(feature = "adhoccounting"))]
|
#[cfg(not(feature = "adhoccounting"))]
|
||||||
{
|
{
|
||||||
@ -37,6 +74,12 @@ impl Bdd {
|
|||||||
var_deps: vec![HashSet::new(), HashSet::new()],
|
var_deps: vec![HashSet::new(), HashSet::new()],
|
||||||
cache: HashMap::new(),
|
cache: HashMap::new(),
|
||||||
count_cache: RefCell::new(HashMap::new()),
|
count_cache: RefCell::new(HashMap::new()),
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
sender: None,
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
receiver: None,
|
||||||
|
ite_cache: HashMap::new(),
|
||||||
|
restrict_cache: HashMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[cfg(feature = "adhoccounting")]
|
#[cfg(feature = "adhoccounting")]
|
||||||
@ -47,15 +90,21 @@ impl Bdd {
|
|||||||
var_deps: vec![HashSet::new(), HashSet::new()],
|
var_deps: vec![HashSet::new(), HashSet::new()],
|
||||||
cache: HashMap::new(),
|
cache: HashMap::new(),
|
||||||
count_cache: RefCell::new(HashMap::new()),
|
count_cache: RefCell::new(HashMap::new()),
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
sender: None,
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
receiver: None,
|
||||||
|
ite_cache: HashMap::new(),
|
||||||
|
restrict_cache: HashMap::new(),
|
||||||
};
|
};
|
||||||
result
|
result
|
||||||
.count_cache
|
.count_cache
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
.insert(Term::TOP, (ModelCounts::top(), 0));
|
.insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
|
||||||
result
|
result
|
||||||
.count_cache
|
.count_cache
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
.insert(Term::BOT, (ModelCounts::bot(), 0));
|
.insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -64,10 +113,12 @@ impl Bdd {
|
|||||||
RefCell::new(HashMap::new())
|
RefCell::new(HashMap::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Instantiates a [variable][crate::datatypes::Var] and returns the representing roBDD as a [`Term`][crate::datatypes::Term].
|
||||||
pub fn variable(&mut self, var: Var) -> Term {
|
pub fn variable(&mut self, var: Var) -> Term {
|
||||||
self.node(var, Term::BOT, Term::TOP)
|
self.node(var, Term::BOT, Term::TOP)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Instantiates a constant, which is either [true] or [false].
|
||||||
pub fn constant(val: bool) -> Term {
|
pub fn constant(val: bool) -> Term {
|
||||||
if val {
|
if val {
|
||||||
Term::TOP
|
Term::TOP
|
||||||
@ -76,35 +127,41 @@ impl Bdd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an roBDD, which represents the negation of the given roBDD.
|
||||||
pub fn not(&mut self, term: Term) -> Term {
|
pub fn not(&mut self, term: Term) -> Term {
|
||||||
self.if_then_else(term, Term::BOT, Term::TOP)
|
self.if_then_else(term, Term::BOT, Term::TOP)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an roBDD, which represents the conjunction of the two given roBDDs.
|
||||||
pub fn and(&mut self, term_a: Term, term_b: Term) -> Term {
|
pub fn and(&mut self, term_a: Term, term_b: Term) -> Term {
|
||||||
self.if_then_else(term_a, term_b, Term::BOT)
|
self.if_then_else(term_a, term_b, Term::BOT)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an roBDD, which represents the disjunction of the two given roBDDs.
|
||||||
pub fn or(&mut self, term_a: Term, term_b: Term) -> Term {
|
pub fn or(&mut self, term_a: Term, term_b: Term) -> Term {
|
||||||
self.if_then_else(term_a, Term::TOP, term_b)
|
self.if_then_else(term_a, Term::TOP, term_b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an roBDD, which represents the implication of the two given roBDDs.
|
||||||
pub fn imp(&mut self, term_a: Term, term_b: Term) -> Term {
|
pub fn imp(&mut self, term_a: Term, term_b: Term) -> Term {
|
||||||
self.if_then_else(term_a, term_b, Term::TOP)
|
self.if_then_else(term_a, term_b, Term::TOP)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an roBDD, which represents the if and only if relation of the two given roBDDs.
|
||||||
pub fn iff(&mut self, term_a: Term, term_b: Term) -> Term {
|
pub fn iff(&mut self, term_a: Term, term_b: Term) -> Term {
|
||||||
let not_b = self.not(term_b);
|
let not_b = self.not(term_b);
|
||||||
self.if_then_else(term_a, term_b, not_b)
|
self.if_then_else(term_a, term_b, not_b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns an roBDD, which represents the exclusive disjunction of the two given roBDDs.
|
||||||
pub fn xor(&mut self, term_a: Term, term_b: Term) -> Term {
|
pub fn xor(&mut self, term_a: Term, term_b: Term) -> Term {
|
||||||
let not_b = self.not(term_b);
|
let not_b = self.not(term_b);
|
||||||
self.if_then_else(term_a, not_b, term_b)
|
self.if_then_else(term_a, not_b, term_b)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the interpretations represented in the reduced BDD, which are either models or none.
|
/// Computes the interpretations represented in the roBDD, which are either models or counter-models.
|
||||||
/// *goal_var* is the variable to which the BDD is related to and it is ensured that the goal is consistent with the respective interpretation
|
/// **goal_var** is the [variable][Var] to which the roBDD is related to and it is ensured that the goal is consistent with the respective interpretation.
|
||||||
/// *goal* is a boolean variable, which defines whether the models or inconsistent interpretations are of interest
|
/// **goal** is a boolean [variable][Var], which defines whether the models or counter-models are of interest.
|
||||||
pub fn interpretations(
|
pub fn interpretations(
|
||||||
&self,
|
&self,
|
||||||
tree: Term,
|
tree: Term,
|
||||||
@ -154,31 +211,43 @@ impl Bdd {
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Restrict the value of a given [variable][crate::datatypes::Var] to **val**.
|
||||||
pub fn restrict(&mut self, tree: Term, var: Var, val: bool) -> Term {
|
pub fn restrict(&mut self, tree: Term, var: Var, val: bool) -> Term {
|
||||||
let node = self.nodes[tree.0];
|
if let Some(result) = self.restrict_cache.get(&(tree, var, val)) {
|
||||||
#[cfg(feature = "variablelist")]
|
*result
|
||||||
{
|
|
||||||
if !self.var_deps[tree.value()].contains(&var) {
|
|
||||||
return tree;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[allow(clippy::collapsible_else_if)]
|
|
||||||
// Readability of algorithm > code-elegance
|
|
||||||
if node.var() > var || node.var() >= Var::BOT {
|
|
||||||
tree
|
|
||||||
} else if node.var() < var {
|
|
||||||
let lonode = self.restrict(node.lo(), var, val);
|
|
||||||
let hinode = self.restrict(node.hi(), var, val);
|
|
||||||
self.node(node.var(), lonode, hinode)
|
|
||||||
} else {
|
} else {
|
||||||
if val {
|
let node = self.nodes[tree.0];
|
||||||
self.restrict(node.hi(), var, val)
|
#[cfg(feature = "variablelist")]
|
||||||
|
{
|
||||||
|
if !self.var_deps[tree.value()].contains(&var) {
|
||||||
|
return tree;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[allow(clippy::collapsible_else_if)]
|
||||||
|
// Readability of algorithm > code-elegance
|
||||||
|
if node.var() > var || node.var() >= Var::BOT {
|
||||||
|
tree
|
||||||
|
} else if node.var() < var {
|
||||||
|
let lonode = self.restrict(node.lo(), var, val);
|
||||||
|
let hinode = self.restrict(node.hi(), var, val);
|
||||||
|
let result = self.node(node.var(), lonode, hinode);
|
||||||
|
self.restrict_cache.insert((tree, var, val), result);
|
||||||
|
result
|
||||||
} else {
|
} else {
|
||||||
self.restrict(node.lo(), var, val)
|
if val {
|
||||||
|
let result = self.restrict(node.hi(), var, val);
|
||||||
|
self.restrict_cache.insert((tree, var, val), result);
|
||||||
|
result
|
||||||
|
} else {
|
||||||
|
let result = self.restrict(node.lo(), var, val);
|
||||||
|
self.restrict_cache.insert((tree, var, val), result);
|
||||||
|
result
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates an roBDD, based on the relation of three roBDDs, which are in an `if-then-else` relation.
|
||||||
fn if_then_else(&mut self, i: Term, t: Term, e: Term) -> Term {
|
fn if_then_else(&mut self, i: Term, t: Term, e: Term) -> Term {
|
||||||
if i == Term::TOP {
|
if i == Term::TOP {
|
||||||
t
|
t
|
||||||
@ -188,7 +257,10 @@ impl Bdd {
|
|||||||
t
|
t
|
||||||
} else if t == Term::TOP && e == Term::BOT {
|
} else if t == Term::TOP && e == Term::BOT {
|
||||||
i
|
i
|
||||||
|
} else if let Some(result) = self.ite_cache.get(&(i, t, e)) {
|
||||||
|
*result
|
||||||
} else {
|
} else {
|
||||||
|
log::trace!("if_then_else: i {i} t {t} e {e} not found");
|
||||||
let minvar = Var(min(
|
let minvar = Var(min(
|
||||||
self.nodes[i.value()].var().value(),
|
self.nodes[i.value()].var().value(),
|
||||||
min(
|
min(
|
||||||
@ -205,9 +277,14 @@ impl Bdd {
|
|||||||
|
|
||||||
let top_ite = self.if_then_else(itop, ttop, etop);
|
let top_ite = self.if_then_else(itop, ttop, etop);
|
||||||
let bot_ite = self.if_then_else(ibot, tbot, ebot);
|
let bot_ite = self.if_then_else(ibot, tbot, ebot);
|
||||||
self.node(minvar, bot_ite, top_ite)
|
let result = self.node(minvar, bot_ite, top_ite);
|
||||||
|
self.ite_cache.insert((i, t, e), result);
|
||||||
|
result
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a new node in the roBDD.
|
||||||
|
/// It will not create duplicate nodes and uses already existing nodes, if applicable.
|
||||||
pub fn node(&mut self, var: Var, lo: Term, hi: Term) -> Term {
|
pub fn node(&mut self, var: Var, lo: Term, hi: Term) -> Term {
|
||||||
if lo == hi {
|
if lo == hi {
|
||||||
lo
|
lo
|
||||||
@ -219,6 +296,15 @@ impl Bdd {
|
|||||||
let new_term = Term(self.nodes.len());
|
let new_term = Term(self.nodes.len());
|
||||||
self.nodes.push(node);
|
self.nodes.push(node);
|
||||||
self.cache.insert(node, new_term);
|
self.cache.insert(node, new_term);
|
||||||
|
#[cfg(feature = "frontend")]
|
||||||
|
if let Some(send) = &self.sender {
|
||||||
|
match send.send(node) {
|
||||||
|
Ok(_) => log::trace!("Sent {node} to the channel."),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!("Error {e} occurred when sending {node} to {:?}", send)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
#[cfg(feature = "variablelist")]
|
#[cfg(feature = "variablelist")]
|
||||||
{
|
{
|
||||||
let mut var_set: HashSet<Var> = self.var_deps[lo.value()]
|
let mut var_set: HashSet<Var> = self.var_deps[lo.value()]
|
||||||
@ -228,29 +314,38 @@ impl Bdd {
|
|||||||
var_set.insert(var);
|
var_set.insert(var);
|
||||||
self.var_deps.push(var_set);
|
self.var_deps.push(var_set);
|
||||||
}
|
}
|
||||||
|
log::trace!("newterm: {} as {:?}", new_term, node);
|
||||||
#[cfg(feature = "adhoccounting")]
|
#[cfg(feature = "adhoccounting")]
|
||||||
{
|
{
|
||||||
log::debug!("newterm: {} as {:?}", new_term, node);
|
|
||||||
let mut count_cache = self.count_cache.borrow_mut();
|
let mut count_cache = self.count_cache.borrow_mut();
|
||||||
let (lo_counts, lodepth) = *count_cache.get(&lo).expect("Cache corrupted");
|
let (lo_counts, lo_paths, lodepth) =
|
||||||
let (hi_counts, hidepth) = *count_cache.get(&hi).expect("Cache corrupted");
|
*count_cache.get(&lo).expect("Cache corrupted");
|
||||||
|
let (hi_counts, hi_paths, hidepth) =
|
||||||
|
*count_cache.get(&hi).expect("Cache corrupted");
|
||||||
log::debug!(
|
log::debug!(
|
||||||
"lo (cm: {}, mo: {}, dp: {})",
|
"lo (cm: {}, mo: {}, p-: {}, p+: {}, dp: {})",
|
||||||
lo_counts.cmodels,
|
lo_counts.cmodels,
|
||||||
lo_counts.models,
|
lo_counts.models,
|
||||||
|
lo_paths.cmodels,
|
||||||
|
lo_paths.models,
|
||||||
lodepth
|
lodepth
|
||||||
);
|
);
|
||||||
log::debug!(
|
log::debug!(
|
||||||
"hi (cm: {}, mo: {}, dp: {})",
|
"hi (cm: {}, mo: {}, p-: {}, p+: {}, dp: {})",
|
||||||
hi_counts.cmodels,
|
hi_counts.cmodels,
|
||||||
hi_counts.models,
|
hi_counts.models,
|
||||||
|
hi_paths.cmodels,
|
||||||
|
hi_paths.models,
|
||||||
hidepth
|
hidepth
|
||||||
);
|
);
|
||||||
|
#[cfg(feature = "adhoccountmodels")]
|
||||||
let (lo_exp, hi_exp) = if lodepth > hidepth {
|
let (lo_exp, hi_exp) = if lodepth > hidepth {
|
||||||
(1, 2usize.pow((lodepth - hidepth) as u32))
|
(1, 2usize.pow((lodepth - hidepth) as u32))
|
||||||
} else {
|
} else {
|
||||||
(2usize.pow((hidepth - lodepth) as u32), 1)
|
(2usize.pow((hidepth - lodepth) as u32), 1)
|
||||||
};
|
};
|
||||||
|
#[cfg(not(feature = "adhoccountmodels"))]
|
||||||
|
let (lo_exp, hi_exp) = (0, 0);
|
||||||
log::debug!("lo_exp {}, hi_exp {}", lo_exp, hi_exp);
|
log::debug!("lo_exp {}, hi_exp {}", lo_exp, hi_exp);
|
||||||
count_cache.insert(
|
count_cache.insert(
|
||||||
new_term,
|
new_term,
|
||||||
@ -260,6 +355,11 @@ impl Bdd {
|
|||||||
lo_counts.models * lo_exp + hi_counts.models * hi_exp,
|
lo_counts.models * lo_exp + hi_counts.models * hi_exp,
|
||||||
)
|
)
|
||||||
.into(),
|
.into(),
|
||||||
|
(
|
||||||
|
lo_paths.cmodels + hi_paths.cmodels,
|
||||||
|
lo_paths.models + hi_paths.models,
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
std::cmp::max(lodepth, hidepth) + 1,
|
std::cmp::max(lodepth, hidepth) + 1,
|
||||||
),
|
),
|
||||||
);
|
);
|
||||||
@ -270,15 +370,15 @@ impl Bdd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the number of counter-models and models for a given BDD-tree
|
/// Computes the number of counter-models and models for a given roBDD.
|
||||||
///
|
///
|
||||||
/// Use the flag `_memoization` to choose between using the memoization approach or not. (This flag does nothing if the feature `adhoccounting` is used)
|
/// Use the flag `_memoization` to choose between using the memoization approach or not. (This flag does nothing, if the feature `adhoccounting` is used)
|
||||||
pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts {
|
pub fn models(&self, term: Term, _memoization: bool) -> ModelCounts {
|
||||||
#[cfg(feature = "adhoccounting")]
|
#[cfg(feature = "adhoccountmodels")]
|
||||||
{
|
{
|
||||||
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").0;
|
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").0;
|
||||||
}
|
}
|
||||||
#[cfg(not(feature = "adhoccounting"))]
|
#[cfg(not(feature = "adhoccountmodels"))]
|
||||||
if _memoization {
|
if _memoization {
|
||||||
self.modelcount_memoization(term).0
|
self.modelcount_memoization(term).0
|
||||||
} else {
|
} else {
|
||||||
@ -286,19 +386,58 @@ impl Bdd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Computes the number of paths, which lead to ⊥ respectively ⊤.
|
||||||
|
///
|
||||||
|
/// Use the flag `_memoization` to choose between using the memoization approach or not. (This flag does nothing, if the feature `adhoccounting` is used)
|
||||||
|
pub fn paths(&self, term: Term, _memoization: bool) -> ModelCounts {
|
||||||
|
#[cfg(feature = "adhoccounting")]
|
||||||
|
{
|
||||||
|
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").1;
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "adhoccounting"))]
|
||||||
|
if _memoization {
|
||||||
|
self.modelcount_memoization(term).1
|
||||||
|
} else {
|
||||||
|
self.modelcount_naive(term).1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Computes the maximal depth of the given sub-diagram.
|
||||||
|
///
|
||||||
|
/// Intuitively this will compute the longest possible path from **term** to a leaf-node (i.e., ⊥ or ⊤).
|
||||||
|
#[allow(dead_code)] // max depth may be used in future heuristics
|
||||||
|
pub fn max_depth(&self, term: Term) -> usize {
|
||||||
|
#[cfg(feature = "adhoccounting")]
|
||||||
|
{
|
||||||
|
return self.count_cache.borrow().get(&term).expect("The term should be originating from this bdd, otherwise the result would be inconsistent anyways").2;
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "adhoccounting"))]
|
||||||
|
match self.count_cache.borrow().get(&term) {
|
||||||
|
Some((_mc, _pc, depth)) => *depth,
|
||||||
|
None => {
|
||||||
|
if term.is_truth_value() {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
self.max_depth(self.nodes[term.0].hi())
|
||||||
|
.max(self.max_depth(self.nodes[term.0].lo()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[allow(dead_code)] // dead code due to more efficient ad-hoc building, still used for a couple of tests
|
#[allow(dead_code)] // dead code due to more efficient ad-hoc building, still used for a couple of tests
|
||||||
/// Computes the number of counter-models, models, and variables for a given BDD-tree
|
/// Computes the number of counter-models, models, and variables for a given roBDD
|
||||||
fn modelcount_naive(&self, term: Term) -> CountNode {
|
fn modelcount_naive(&self, term: Term) -> CountNode {
|
||||||
if term == Term::TOP {
|
if term == Term::TOP {
|
||||||
(ModelCounts::top(), 0)
|
(ModelCounts::top(), ModelCounts::top(), 0)
|
||||||
} else if term == Term::BOT {
|
} else if term == Term::BOT {
|
||||||
(ModelCounts::bot(), 0)
|
(ModelCounts::bot(), ModelCounts::bot(), 0)
|
||||||
} else {
|
} else {
|
||||||
let node = &self.nodes[term.0];
|
let node = &self.nodes[term.0];
|
||||||
let mut lo_exp = 0u32;
|
let mut lo_exp = 0u32;
|
||||||
let mut hi_exp = 0u32;
|
let mut hi_exp = 0u32;
|
||||||
let (lo_counts, lodepth) = self.modelcount_naive(node.lo());
|
let (lo_counts, lo_paths, lodepth) = self.modelcount_naive(node.lo());
|
||||||
let (hi_counts, hidepth) = self.modelcount_naive(node.hi());
|
let (hi_counts, hi_paths, hidepth) = self.modelcount_naive(node.hi());
|
||||||
if lodepth > hidepth {
|
if lodepth > hidepth {
|
||||||
hi_exp = (lodepth - hidepth) as u32;
|
hi_exp = (lodepth - hidepth) as u32;
|
||||||
} else {
|
} else {
|
||||||
@ -310,6 +449,11 @@ impl Bdd {
|
|||||||
lo_counts.models * 2usize.pow(lo_exp) + hi_counts.models * 2usize.pow(hi_exp),
|
lo_counts.models * 2usize.pow(lo_exp) + hi_counts.models * 2usize.pow(hi_exp),
|
||||||
)
|
)
|
||||||
.into(),
|
.into(),
|
||||||
|
(
|
||||||
|
lo_paths.cmodels + hi_paths.cmodels,
|
||||||
|
lo_paths.models + hi_paths.models,
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
std::cmp::max(lodepth, hidepth) + 1,
|
std::cmp::max(lodepth, hidepth) + 1,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -317,9 +461,9 @@ impl Bdd {
|
|||||||
|
|
||||||
fn modelcount_memoization(&self, term: Term) -> CountNode {
|
fn modelcount_memoization(&self, term: Term) -> CountNode {
|
||||||
if term == Term::TOP {
|
if term == Term::TOP {
|
||||||
(ModelCounts::top(), 0)
|
(ModelCounts::top(), ModelCounts::top(), 0)
|
||||||
} else if term == Term::BOT {
|
} else if term == Term::BOT {
|
||||||
(ModelCounts::bot(), 0)
|
(ModelCounts::bot(), ModelCounts::bot(), 0)
|
||||||
} else {
|
} else {
|
||||||
if let Some(result) = self.count_cache.borrow().get(&term) {
|
if let Some(result) = self.count_cache.borrow().get(&term) {
|
||||||
return *result;
|
return *result;
|
||||||
@ -328,8 +472,8 @@ impl Bdd {
|
|||||||
let node = &self.nodes[term.0];
|
let node = &self.nodes[term.0];
|
||||||
let mut lo_exp = 0u32;
|
let mut lo_exp = 0u32;
|
||||||
let mut hi_exp = 0u32;
|
let mut hi_exp = 0u32;
|
||||||
let (lo_counts, lodepth) = self.modelcount_memoization(node.lo());
|
let (lo_counts, lo_paths, lodepth) = self.modelcount_memoization(node.lo());
|
||||||
let (hi_counts, hidepth) = self.modelcount_memoization(node.hi());
|
let (hi_counts, hi_paths, hidepth) = self.modelcount_memoization(node.hi());
|
||||||
if lodepth > hidepth {
|
if lodepth > hidepth {
|
||||||
hi_exp = (lodepth - hidepth) as u32;
|
hi_exp = (lodepth - hidepth) as u32;
|
||||||
} else {
|
} else {
|
||||||
@ -343,6 +487,11 @@ impl Bdd {
|
|||||||
+ hi_counts.models * 2usize.pow(hi_exp),
|
+ hi_counts.models * 2usize.pow(hi_exp),
|
||||||
)
|
)
|
||||||
.into(),
|
.into(),
|
||||||
|
(
|
||||||
|
lo_paths.cmodels + hi_paths.cmodels,
|
||||||
|
lo_paths.models + hi_paths.models,
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
std::cmp::max(lodepth, hidepth) + 1,
|
std::cmp::max(lodepth, hidepth) + 1,
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
@ -351,17 +500,17 @@ impl Bdd {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// repairs the internal structures after an import
|
/// Repairs the internal structures after an import.
|
||||||
pub fn fix_import(&mut self) {
|
pub fn fix_import(&mut self) {
|
||||||
self.generate_var_dependencies();
|
self.generate_var_dependencies();
|
||||||
#[cfg(feature = "adhoccounting")]
|
#[cfg(feature = "adhoccounting")]
|
||||||
{
|
{
|
||||||
self.count_cache
|
self.count_cache
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
.insert(Term::TOP, (ModelCounts::top(), 0));
|
.insert(Term::TOP, (ModelCounts::top(), ModelCounts::top(), 0));
|
||||||
self.count_cache
|
self.count_cache
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
.insert(Term::BOT, (ModelCounts::bot(), 0));
|
.insert(Term::BOT, (ModelCounts::bot(), ModelCounts::bot(), 0));
|
||||||
for i in 0..self.nodes.len() {
|
for i in 0..self.nodes.len() {
|
||||||
log::debug!("fixing Term({})", i);
|
log::debug!("fixing Term({})", i);
|
||||||
self.modelcount_memoization(Term(i));
|
self.modelcount_memoization(Term(i));
|
||||||
@ -385,6 +534,7 @@ impl Bdd {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a [HashSet] of [variables][crate::datatypes::Var], which occur in a given roBDD.
|
||||||
pub fn var_dependencies(&self, tree: Term) -> HashSet<Var> {
|
pub fn var_dependencies(&self, tree: Term) -> HashSet<Var> {
|
||||||
#[cfg(feature = "variablelist")]
|
#[cfg(feature = "variablelist")]
|
||||||
{
|
{
|
||||||
@ -405,6 +555,31 @@ impl Bdd {
|
|||||||
var_set
|
var_set
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the variable impact of a [variable][crate::datatypes::Var] with respect to a given set of roBDDs.
|
||||||
|
pub fn passive_var_impact(&self, var: Var, termlist: &[Term]) -> usize {
|
||||||
|
termlist.iter().fold(0usize, |acc, val| {
|
||||||
|
if self.var_dependencies(*val).contains(&var) {
|
||||||
|
acc + 1
|
||||||
|
} else {
|
||||||
|
acc
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Counts how often another roBDD uses a [variable][crate::datatypes::Var], which occurs in this roBDD.
|
||||||
|
pub fn active_var_impact(&self, var: Var, termlist: &[Term]) -> usize {
|
||||||
|
(0..termlist.len()).fold(0usize, |acc, idx| {
|
||||||
|
if self
|
||||||
|
.var_dependencies(termlist[var.value()])
|
||||||
|
.contains(&Var(idx))
|
||||||
|
{
|
||||||
|
acc + 1
|
||||||
|
} else {
|
||||||
|
acc
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -515,7 +690,7 @@ mod test {
|
|||||||
let a1 = bdd.and(v1, v2);
|
let a1 = bdd.and(v1, v2);
|
||||||
let _a2 = bdd.or(a1, v3);
|
let _a2 = bdd.or(a1, v3);
|
||||||
|
|
||||||
assert_eq!(format!("{}", bdd), " \n0 BddNode: Var(18446744073709551614), lo: Term(0), hi: Term(0)\n1 BddNode: Var(18446744073709551615), lo: Term(1), hi: Term(1)\n2 BddNode: Var(0), lo: Term(0), hi: Term(1)\n3 BddNode: Var(1), lo: Term(0), hi: Term(1)\n4 BddNode: Var(2), lo: Term(0), hi: Term(1)\n5 BddNode: Var(0), lo: Term(0), hi: Term(3)\n6 BddNode: Var(1), lo: Term(4), hi: Term(1)\n7 BddNode: Var(0), lo: Term(4), hi: Term(6)\n");
|
assert_eq!(format!("{bdd}"), " \n0 BddNode: Var(18446744073709551614), lo: Term(0), hi: Term(0)\n1 BddNode: Var(18446744073709551615), lo: Term(1), hi: Term(1)\n2 BddNode: Var(0), lo: Term(0), hi: Term(1)\n3 BddNode: Var(1), lo: Term(0), hi: Term(1)\n4 BddNode: Var(2), lo: Term(0), hi: Term(1)\n5 BddNode: Var(0), lo: Term(0), hi: Term(3)\n6 BddNode: Var(1), lo: Term(4), hi: Term(1)\n7 BddNode: Var(0), lo: Term(4), hi: Term(6)\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -531,6 +706,7 @@ mod test {
|
|||||||
let formula3 = bdd.xor(v1, v2);
|
let formula3 = bdd.xor(v1, v2);
|
||||||
let formula4 = bdd.and(v3, formula2);
|
let formula4 = bdd.and(v3, formula2);
|
||||||
|
|
||||||
|
#[cfg(feature = "adhoccountmodels")]
|
||||||
assert_eq!(bdd.models(v1, false), (1, 1).into());
|
assert_eq!(bdd.models(v1, false), (1, 1).into());
|
||||||
let mut x = bdd.count_cache.get_mut().iter().collect::<Vec<_>>();
|
let mut x = bdd.count_cache.get_mut().iter().collect::<Vec<_>>();
|
||||||
x.sort();
|
x.sort();
|
||||||
@ -539,55 +715,99 @@ mod test {
|
|||||||
log::debug!("{:?}", x);
|
log::debug!("{:?}", x);
|
||||||
}
|
}
|
||||||
log::debug!("{:?}", x);
|
log::debug!("{:?}", x);
|
||||||
assert_eq!(bdd.models(formula1, false), (3, 1).into());
|
#[cfg(feature = "adhoccountmodels")]
|
||||||
assert_eq!(bdd.models(formula2, false), (1, 3).into());
|
{
|
||||||
assert_eq!(bdd.models(formula3, false), (2, 2).into());
|
assert_eq!(bdd.models(formula1, false), (3, 1).into());
|
||||||
assert_eq!(bdd.models(formula4, false), (5, 3).into());
|
assert_eq!(bdd.models(formula2, false), (1, 3).into());
|
||||||
assert_eq!(bdd.models(Term::TOP, false), (0, 1).into());
|
assert_eq!(bdd.models(formula3, false), (2, 2).into());
|
||||||
assert_eq!(bdd.models(Term::BOT, false), (1, 0).into());
|
assert_eq!(bdd.models(formula4, false), (5, 3).into());
|
||||||
|
assert_eq!(bdd.models(Term::TOP, false), (0, 1).into());
|
||||||
|
assert_eq!(bdd.models(Term::BOT, false), (1, 0).into());
|
||||||
|
|
||||||
assert_eq!(bdd.models(v1, true), (1, 1).into());
|
assert_eq!(bdd.models(v1, true), (1, 1).into());
|
||||||
assert_eq!(bdd.models(formula1, true), (3, 1).into());
|
assert_eq!(bdd.models(formula1, true), (3, 1).into());
|
||||||
assert_eq!(bdd.models(formula2, true), (1, 3).into());
|
assert_eq!(bdd.models(formula2, true), (1, 3).into());
|
||||||
assert_eq!(bdd.models(formula3, true), (2, 2).into());
|
assert_eq!(bdd.models(formula3, true), (2, 2).into());
|
||||||
assert_eq!(bdd.models(formula4, true), (5, 3).into());
|
assert_eq!(bdd.models(formula4, true), (5, 3).into());
|
||||||
assert_eq!(bdd.models(Term::TOP, true), (0, 1).into());
|
assert_eq!(bdd.models(Term::TOP, true), (0, 1).into());
|
||||||
assert_eq!(bdd.models(Term::BOT, true), (1, 0).into());
|
assert_eq!(bdd.models(Term::BOT, true), (1, 0).into());
|
||||||
|
}
|
||||||
|
|
||||||
assert_eq!(bdd.modelcount_naive(v1), ((1, 1).into(), 1));
|
assert_eq!(bdd.paths(formula1, false), (2, 1).into());
|
||||||
assert_eq!(bdd.modelcount_naive(formula1), ((3, 1).into(), 2));
|
assert_eq!(bdd.paths(formula2, false), (1, 2).into());
|
||||||
assert_eq!(bdd.modelcount_naive(formula2), ((1, 3).into(), 2));
|
assert_eq!(bdd.paths(formula3, false), (2, 2).into());
|
||||||
assert_eq!(bdd.modelcount_naive(formula3), ((2, 2).into(), 2));
|
assert_eq!(bdd.paths(formula4, false), (3, 2).into());
|
||||||
assert_eq!(bdd.modelcount_naive(formula4), ((5, 3).into(), 3));
|
assert_eq!(bdd.paths(Term::TOP, false), (0, 1).into());
|
||||||
assert_eq!(bdd.modelcount_naive(Term::TOP), ((0, 1).into(), 0));
|
assert_eq!(bdd.paths(Term::BOT, false), (1, 0).into());
|
||||||
assert_eq!(bdd.modelcount_naive(Term::BOT), ((1, 0).into(), 0));
|
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(bdd.paths(v1, true), (1, 1).into());
|
||||||
bdd.modelcount_naive(formula4),
|
assert_eq!(bdd.paths(formula1, true), (2, 1).into());
|
||||||
bdd.modelcount_memoization(formula4)
|
assert_eq!(bdd.paths(formula2, true), (1, 2).into());
|
||||||
);
|
assert_eq!(bdd.paths(formula3, true), (2, 2).into());
|
||||||
|
assert_eq!(bdd.paths(formula4, true), (3, 2).into());
|
||||||
|
assert_eq!(bdd.paths(Term::TOP, true), (0, 1).into());
|
||||||
|
assert_eq!(bdd.paths(Term::BOT, true), (1, 0).into());
|
||||||
|
|
||||||
assert_eq!(bdd.modelcount_naive(v1), bdd.modelcount_memoization(v1));
|
assert_eq!(bdd.modelcount_naive(v1), ((1, 1).into(), (1, 1).into(), 1));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bdd.modelcount_naive(formula1),
|
bdd.modelcount_naive(formula1),
|
||||||
bdd.modelcount_memoization(formula1)
|
((3, 1).into(), (2, 1).into(), 2)
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bdd.modelcount_naive(formula2),
|
bdd.modelcount_naive(formula2),
|
||||||
bdd.modelcount_memoization(formula2)
|
((1, 3).into(), (1, 2).into(), 2)
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bdd.modelcount_naive(formula3),
|
bdd.modelcount_naive(formula3),
|
||||||
bdd.modelcount_memoization(formula3)
|
((2, 2).into(), (2, 2).into(), 2)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bdd.modelcount_naive(formula4),
|
||||||
|
((5, 3).into(), (3, 2).into(), 3)
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bdd.modelcount_naive(Term::TOP),
|
bdd.modelcount_naive(Term::TOP),
|
||||||
bdd.modelcount_memoization(Term::TOP)
|
((0, 1).into(), (0, 1).into(), 0)
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
bdd.modelcount_naive(Term::BOT),
|
bdd.modelcount_naive(Term::BOT),
|
||||||
bdd.modelcount_memoization(Term::BOT)
|
((1, 0).into(), (1, 0).into(), 0)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
#[cfg(feature = "adhoccountmodels")]
|
||||||
|
{
|
||||||
|
assert_eq!(
|
||||||
|
bdd.modelcount_naive(formula4),
|
||||||
|
bdd.modelcount_memoization(formula4)
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(bdd.modelcount_naive(v1), bdd.modelcount_memoization(v1));
|
||||||
|
assert_eq!(
|
||||||
|
bdd.modelcount_naive(formula1),
|
||||||
|
bdd.modelcount_memoization(formula1)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bdd.modelcount_naive(formula2),
|
||||||
|
bdd.modelcount_memoization(formula2)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bdd.modelcount_naive(formula3),
|
||||||
|
bdd.modelcount_memoization(formula3)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bdd.modelcount_naive(Term::TOP),
|
||||||
|
bdd.modelcount_memoization(Term::TOP)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bdd.modelcount_naive(Term::BOT),
|
||||||
|
bdd.modelcount_memoization(Term::BOT)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(bdd.max_depth(Term::BOT), 0);
|
||||||
|
assert_eq!(bdd.max_depth(v1), 1);
|
||||||
|
assert_eq!(bdd.max_depth(formula3), 2);
|
||||||
|
assert_eq!(bdd.max_depth(formula4), 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "variablelist")]
|
#[cfg(feature = "variablelist")]
|
||||||
@ -617,6 +837,39 @@ mod test {
|
|||||||
.for_each(|(left, right)| {
|
.for_each(|(left, right)| {
|
||||||
assert!(left == right);
|
assert!(left == right);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
bdd.passive_var_impact(Var(0), &[formula1, formula2, formula3, formula4]),
|
||||||
|
4
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bdd.passive_var_impact(Var(2), &[formula1, formula2, formula3, formula4]),
|
||||||
|
1
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
bdd.passive_var_impact(Var(2), &[formula1, formula2, formula3]),
|
||||||
|
0
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn var_impact() {
|
||||||
|
let mut bdd = Bdd::new();
|
||||||
|
let v1 = bdd.variable(Var(0));
|
||||||
|
let v2 = bdd.variable(Var(1));
|
||||||
|
let v3 = bdd.variable(Var(2));
|
||||||
|
|
||||||
|
let formula1 = bdd.and(v1, v2);
|
||||||
|
let formula2 = bdd.or(v1, v2);
|
||||||
|
|
||||||
|
let ac: Vec<Term> = vec![formula1, formula2, v3];
|
||||||
|
|
||||||
|
assert_eq!(bdd.passive_var_impact(Var(0), &ac), 2);
|
||||||
|
assert_eq!(bdd.passive_var_impact(Var(1), &ac), 2);
|
||||||
|
assert_eq!(bdd.passive_var_impact(Var(2), &ac), 1);
|
||||||
|
|
||||||
|
assert_eq!(bdd.active_var_impact(Var(0), &ac), 2);
|
||||||
|
assert_eq!(bdd.active_var_impact(Var(2), &ac), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
267
lib/src/obdd/frontend.rs
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
//! Implementation of frontend-feature related methods and functions
|
||||||
|
//! See the Structs in the [obdd-module][super] for most of the implementations
|
||||||
|
|
||||||
|
use crate::datatypes::Term;
|
||||||
|
|
||||||
|
use super::BddNode;
|
||||||
|
impl super::Bdd {
|
||||||
|
/// Instantiate a new [roBDD][super::Bdd] structure.
|
||||||
|
/// Constants for the [`⊤`][crate::datatypes::Term::TOP] and [`⊥`][crate::datatypes::Term::BOT] concepts are prepared in that step too.
|
||||||
|
/// # Attention
|
||||||
|
/// Constants for [`⊤`][crate::datatypes::Term::TOP] and [`⊥`][crate::datatypes::Term::BOT] concepts are not sent, as they are considered to be existing in every [Bdd][super::Bdd] structure.
|
||||||
|
pub fn with_sender(sender: crossbeam_channel::Sender<BddNode>) -> Self {
|
||||||
|
// TODO nicer handling of the initialisation though overhead is not an issue here
|
||||||
|
let mut result = Self::new();
|
||||||
|
result.set_sender(sender);
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Instantiate a new [roBDD][super::Bdd] structure.
|
||||||
|
/// Constants for the [`⊤`][crate::datatypes::Term::TOP] and [`⊥`][crate::datatypes::Term::BOT] concepts are prepared in that step too.
|
||||||
|
/// # Attention
|
||||||
|
/// Note that mixing manipulating operations and utilising the communication channel for a receiving [roBDD][super::Bdd] may end up in inconsistent data.
|
||||||
|
/// So far, only manipulate the [roBDD][super::Bdd] if no further [recv][Self::recv] will be called.
|
||||||
|
pub fn with_receiver(receiver: crossbeam_channel::Receiver<BddNode>) -> Self {
|
||||||
|
// TODO nicer handling of the initialisation though overhead is not an issue here
|
||||||
|
let mut result = Self::new();
|
||||||
|
result.set_receiver(receiver);
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Instantiate a new [roBDD][super::Bdd] structure.
|
||||||
|
/// Constants for the [`⊤`][crate::datatypes::Term::TOP] and [`⊥`][crate::datatypes::Term::BOT] concepts are prepared in that step too.
|
||||||
|
/// # Attention
|
||||||
|
/// - Constants for [`⊤`][crate::datatypes::Term::TOP] and [`⊥`][crate::datatypes::Term::BOT] concepts are not sent, as they are considered to be existing in every [Bdd][super::Bdd] structure.
|
||||||
|
/// - Mixing manipulating operations and utilising the communication channel for a receiving [roBDD][super::Bdd] may end up in inconsistent data.
|
||||||
|
///
|
||||||
|
/// So far, only manipulate the [roBDD][super::Bdd] if no further [recv][Self::recv] will be called.
|
||||||
|
pub fn with_sender_receiver(
|
||||||
|
sender: crossbeam_channel::Sender<BddNode>,
|
||||||
|
receiver: crossbeam_channel::Receiver<BddNode>,
|
||||||
|
) -> Self {
|
||||||
|
let mut result = Self::new();
|
||||||
|
result.set_receiver(receiver);
|
||||||
|
result.set_sender(sender);
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the currently used [sender][crossbeam_channel::Sender]
|
||||||
|
pub fn set_sender(&mut self, sender: crossbeam_channel::Sender<BddNode>) {
|
||||||
|
self.sender = Some(sender);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Updates the currently used [receiver][crossbeam_channel::Receiver]
|
||||||
|
pub fn set_receiver(&mut self, receiver: crossbeam_channel::Receiver<BddNode>) {
|
||||||
|
self.receiver = Some(receiver);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Receives all information till the looked for [`Term`][crate::datatypes::Term] is either found or all data is read.
|
||||||
|
/// Note that the values are read, consumed, and added to the [Bdd][super::Bdd].
|
||||||
|
/// # Returns
|
||||||
|
/// - [`true`] if the [term][crate::datatypes::Term] is found (either in the [Bdd][super::Bdd] or in the channel.
|
||||||
|
/// - [`false`] if neither the [Bdd][super::Bdd] nor the channel contains the [term][crate::datatypes::Term].
|
||||||
|
pub fn recv(&mut self, term: Term) -> bool {
|
||||||
|
if term.value() < self.nodes.len() {
|
||||||
|
true
|
||||||
|
} else if let Some(recv) = &self.receiver {
|
||||||
|
loop {
|
||||||
|
match recv.try_recv() {
|
||||||
|
Ok(node) => {
|
||||||
|
let new_term = Term(self.nodes.len());
|
||||||
|
self.nodes.push(node);
|
||||||
|
self.cache.insert(node, new_term);
|
||||||
|
if let Some(send) = &self.sender {
|
||||||
|
match send.send(node) {
|
||||||
|
Ok(_) => log::trace!("Sent {node} to the channel."),
|
||||||
|
Err(e) => {
|
||||||
|
log::error!(
|
||||||
|
"Error {e} occurred when sending {node} to {:?}",
|
||||||
|
send
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if new_term == term {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(_) => return false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn get_bdd_updates() {
|
||||||
|
let (send, recv) = crossbeam_channel::unbounded();
|
||||||
|
let mut bdd = Bdd::with_sender(send);
|
||||||
|
|
||||||
|
let solving = std::thread::spawn(move || {
|
||||||
|
let v1 = bdd.variable(Var(0));
|
||||||
|
let v2 = bdd.variable(Var(1));
|
||||||
|
|
||||||
|
assert_eq!(v1, Term(2));
|
||||||
|
assert_eq!(v2, Term(3));
|
||||||
|
|
||||||
|
let t1 = bdd.and(v1, v2);
|
||||||
|
let nt1 = bdd.not(t1);
|
||||||
|
let ft = bdd.or(v1, nt1);
|
||||||
|
|
||||||
|
assert_eq!(ft, Term::TOP);
|
||||||
|
|
||||||
|
let v3 = bdd.variable(Var(2));
|
||||||
|
let nv3 = bdd.not(v3);
|
||||||
|
assert_eq!(bdd.and(v3, nv3), Term::BOT);
|
||||||
|
|
||||||
|
let conj = bdd.and(v1, v2);
|
||||||
|
assert_eq!(bdd.restrict(conj, Var(0), false), Term::BOT);
|
||||||
|
assert_eq!(bdd.restrict(conj, Var(0), true), v2);
|
||||||
|
|
||||||
|
let a = bdd.and(v3, v2);
|
||||||
|
let b = bdd.or(v2, v1);
|
||||||
|
|
||||||
|
let con1 = bdd.and(a, conj);
|
||||||
|
|
||||||
|
let end = bdd.or(con1, b);
|
||||||
|
log::debug!("Restrict test: restrict({},{},false)", end, Var(1));
|
||||||
|
let x = bdd.restrict(end, Var(1), false);
|
||||||
|
assert_eq!(x, Term(2));
|
||||||
|
});
|
||||||
|
|
||||||
|
let updates: Vec<BddNode> = recv.iter().collect();
|
||||||
|
assert_eq!(
|
||||||
|
updates,
|
||||||
|
vec![
|
||||||
|
BddNode::new(Var(0), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(1), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(3)),
|
||||||
|
BddNode::new(Var(1), Term(1), Term(0)),
|
||||||
|
BddNode::new(Var(0), Term(1), Term(5)),
|
||||||
|
BddNode::new(Var(2), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(2), Term(1), Term(0)),
|
||||||
|
BddNode::new(Var(1), Term(0), Term(7)),
|
||||||
|
BddNode::new(Var(0), Term(3), Term(1)),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(9)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
solving.join().expect("Both threads should terminate");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn recv_send() {
|
||||||
|
let (send1, recv1) = crossbeam_channel::unbounded();
|
||||||
|
let (send2, recv2) = crossbeam_channel::unbounded();
|
||||||
|
let mut bdd1 = Bdd::with_sender(send1);
|
||||||
|
let mut bddm = Bdd::with_sender_receiver(send2, recv1);
|
||||||
|
let mut bddl = Bdd::with_receiver(recv2);
|
||||||
|
|
||||||
|
let solving = std::thread::spawn(move || {
|
||||||
|
let v1 = bdd1.variable(Var(0));
|
||||||
|
let v2 = bdd1.variable(Var(1));
|
||||||
|
|
||||||
|
assert_eq!(v1, Term(2));
|
||||||
|
assert_eq!(v2, Term(3));
|
||||||
|
|
||||||
|
let t1 = bdd1.and(v1, v2);
|
||||||
|
let nt1 = bdd1.not(t1);
|
||||||
|
let ft = bdd1.or(v1, nt1);
|
||||||
|
|
||||||
|
assert_eq!(ft, Term::TOP);
|
||||||
|
|
||||||
|
let v3 = bdd1.variable(Var(2));
|
||||||
|
let nv3 = bdd1.not(v3);
|
||||||
|
assert_eq!(bdd1.and(v3, nv3), Term::BOT);
|
||||||
|
|
||||||
|
let conj = bdd1.and(v1, v2);
|
||||||
|
assert_eq!(bdd1.restrict(conj, Var(0), false), Term::BOT);
|
||||||
|
assert_eq!(bdd1.restrict(conj, Var(0), true), v2);
|
||||||
|
|
||||||
|
let a = bdd1.and(v3, v2);
|
||||||
|
let b = bdd1.or(v2, v1);
|
||||||
|
|
||||||
|
let con1 = bdd1.and(a, conj);
|
||||||
|
|
||||||
|
let end = bdd1.or(con1, b);
|
||||||
|
log::debug!("Restrict test: restrict({},{},false)", end, Var(1));
|
||||||
|
let x = bdd1.restrict(end, Var(1), false);
|
||||||
|
assert_eq!(x, Term(2));
|
||||||
|
});
|
||||||
|
// allow the worker to fill the channels
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||||
|
// both are initialised, no updates so far
|
||||||
|
assert_eq!(bddm.nodes, bddl.nodes);
|
||||||
|
// receiving a truth constant should work without changing the bdd
|
||||||
|
assert!(bddm.recv(Term::TOP));
|
||||||
|
assert_eq!(bddm.nodes, bddl.nodes);
|
||||||
|
// receiving some element works for middle -> last, but not last -> middle
|
||||||
|
assert!(bddm.recv(Term(2)));
|
||||||
|
assert!(bddl.recv(Term(2)));
|
||||||
|
assert_eq!(bddl.nodes.len(), 3);
|
||||||
|
assert!(!bddl.recv(Term(5)));
|
||||||
|
// get all elements into middle bdd1
|
||||||
|
assert!(!bddm.recv(Term(usize::MAX)));
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
bddm.nodes,
|
||||||
|
vec![
|
||||||
|
BddNode::bot_node(),
|
||||||
|
BddNode::top_node(),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(1), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(3)),
|
||||||
|
BddNode::new(Var(1), Term(1), Term(0)),
|
||||||
|
BddNode::new(Var(0), Term(1), Term(5)),
|
||||||
|
BddNode::new(Var(2), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(2), Term(1), Term(0)),
|
||||||
|
BddNode::new(Var(1), Term(0), Term(7)),
|
||||||
|
BddNode::new(Var(0), Term(3), Term(1)),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(9)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
// last bdd is still in the previous state
|
||||||
|
assert_eq!(
|
||||||
|
bddl.nodes,
|
||||||
|
vec![
|
||||||
|
BddNode::bot_node(),
|
||||||
|
BddNode::top_node(),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(1)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
// and now catch up till 10
|
||||||
|
assert!(bddl.recv(Term(10)));
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
bddl.nodes,
|
||||||
|
vec![
|
||||||
|
BddNode::bot_node(),
|
||||||
|
BddNode::top_node(),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(1), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(0), Term(0), Term(3)),
|
||||||
|
BddNode::new(Var(1), Term(1), Term(0)),
|
||||||
|
BddNode::new(Var(0), Term(1), Term(5)),
|
||||||
|
BddNode::new(Var(2), Term(0), Term(1)),
|
||||||
|
BddNode::new(Var(2), Term(1), Term(0)),
|
||||||
|
BddNode::new(Var(1), Term(0), Term(7)),
|
||||||
|
BddNode::new(Var(0), Term(3), Term(1)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
|
||||||
|
solving.join().expect("Both threads should terminate");
|
||||||
|
|
||||||
|
// asking for 10 again works too
|
||||||
|
assert!(bddl.recv(Term(10)));
|
||||||
|
// fully catch up with the last bdd
|
||||||
|
assert!(bddl.recv(Term(11)));
|
||||||
|
assert_eq!(bddl.nodes, bddm.nodes);
|
||||||
|
}
|
||||||
|
}
|
||||||
@ -1,8 +1,8 @@
|
|||||||
//! vectorize maps with non-standard keys
|
//! Vectorize maps with non-standard keys.
|
||||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
|
|
||||||
/// Serialise into a Vector from a Map
|
/// Serialize into a [Vector][std::vec::Vec] from a [Map][std::collections::HashMap].
|
||||||
pub fn serialize<'a, T, K, V, S>(target: T, ser: S) -> Result<S::Ok, S::Error>
|
pub fn serialize<'a, T, K, V, S>(target: T, ser: S) -> Result<S::Ok, S::Error>
|
||||||
where
|
where
|
||||||
S: Serializer,
|
S: Serializer,
|
||||||
@ -11,10 +11,10 @@ where
|
|||||||
V: Serialize + 'a,
|
V: Serialize + 'a,
|
||||||
{
|
{
|
||||||
let container: Vec<_> = target.into_iter().collect();
|
let container: Vec<_> = target.into_iter().collect();
|
||||||
serde::Serialize::serialize(&container, ser)
|
Serialize::serialize(&container, ser)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Deserialize from a Vector to a Map
|
/// Deserialize from a [Vector][std::vec::Vec] to a [Map][std::collections::HashMap].
|
||||||
pub fn deserialize<'de, T, K, V, D>(des: D) -> Result<T, D::Error>
|
pub fn deserialize<'de, T, K, V, D>(des: D) -> Result<T, D::Error>
|
||||||
where
|
where
|
||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
@ -22,6 +22,6 @@ where
|
|||||||
K: Deserialize<'de>,
|
K: Deserialize<'de>,
|
||||||
V: Deserialize<'de>,
|
V: Deserialize<'de>,
|
||||||
{
|
{
|
||||||
let container: Vec<_> = serde::Deserialize::deserialize(des)?;
|
let container: Vec<_> = Deserialize::deserialize(des)?;
|
||||||
Ok(T::from_iter(container.into_iter()))
|
Ok(T::from_iter(container))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
//! A Parser for ADFs with all needed helper-methods.
|
//! Parser for ADFs with all needed helper-methods.
|
||||||
//! It utilises the [nom-crate](https://crates.io/crates/nom)
|
//! It utilises the [nom-crate](https://crates.io/crates/nom).
|
||||||
use lexical_sort::{natural_lexical_cmp, StringSort};
|
use lexical_sort::{natural_lexical_cmp, StringSort};
|
||||||
use nom::{
|
use nom::{
|
||||||
branch::alt,
|
branch::alt,
|
||||||
@ -10,32 +10,38 @@ use nom::{
|
|||||||
sequence::{delimited, preceded, separated_pair, terminated},
|
sequence::{delimited, preceded, separated_pair, terminated},
|
||||||
IResult,
|
IResult,
|
||||||
};
|
};
|
||||||
use std::{cell::RefCell, collections::HashMap, rc::Rc};
|
use std::collections::HashMap;
|
||||||
|
use std::{
|
||||||
|
cell::RefCell,
|
||||||
|
sync::{Arc, RwLock},
|
||||||
|
};
|
||||||
|
|
||||||
/// A representation of a formula, still using the strings from the input
|
use crate::datatypes::adf::VarContainer;
|
||||||
|
|
||||||
|
/// A representation of a formula, still using the strings from the input.
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
pub enum Formula<'a> {
|
pub enum Formula {
|
||||||
/// c(v) in the input format
|
/// `c(f)` in the input format.
|
||||||
Bot,
|
Bot,
|
||||||
/// c(f) in the input format
|
/// `c(v)` in the input format.
|
||||||
Top,
|
Top,
|
||||||
/// Some atomic variable in the input format
|
/// Some atomic variable in the input format.
|
||||||
Atom(&'a str),
|
Atom(String),
|
||||||
/// Negation of a subformula
|
/// Negation of a subformula.
|
||||||
Not(Box<Formula<'a>>),
|
Not(Box<Formula>),
|
||||||
/// Conjunction of two subformulae
|
/// Conjunction of two subformulae.
|
||||||
And(Box<Formula<'a>>, Box<Formula<'a>>),
|
And(Box<Formula>, Box<Formula>),
|
||||||
/// Disjunction of two subformulae
|
/// Disjunction of two subformulae.
|
||||||
Or(Box<Formula<'a>>, Box<Formula<'a>>),
|
Or(Box<Formula>, Box<Formula>),
|
||||||
/// Implication of two subformulae
|
/// Implication of two subformulae.
|
||||||
Imp(Box<Formula<'a>>, Box<Formula<'a>>),
|
Imp(Box<Formula>, Box<Formula>),
|
||||||
/// Exclusive-Or of two subformulae
|
/// Exclusive-Or of two subformulae.
|
||||||
Xor(Box<Formula<'a>>, Box<Formula<'a>>),
|
Xor(Box<Formula>, Box<Formula>),
|
||||||
/// If and only if connective between two formulae
|
/// If and only if connective between two formulae.
|
||||||
Iff(Box<Formula<'a>>, Box<Formula<'a>>),
|
Iff(Box<Formula>, Box<Formula>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Formula<'_> {
|
impl Formula {
|
||||||
pub(crate) fn to_boolean_expr(
|
pub(crate) fn to_boolean_expr(
|
||||||
&self,
|
&self,
|
||||||
) -> biodivine_lib_bdd::boolean_expression::BooleanExpression {
|
) -> biodivine_lib_bdd::boolean_expression::BooleanExpression {
|
||||||
@ -84,29 +90,29 @@ impl Formula<'_> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Debug for Formula<'_> {
|
impl std::fmt::Debug for Formula {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match self {
|
match self {
|
||||||
Formula::Atom(a) => {
|
Formula::Atom(a) => {
|
||||||
write!(f, "{}", a)?;
|
write!(f, "{a}")?;
|
||||||
}
|
}
|
||||||
Formula::Not(n) => {
|
Formula::Not(n) => {
|
||||||
write!(f, "not({:?})", n)?;
|
write!(f, "not({n:?})")?;
|
||||||
}
|
}
|
||||||
Formula::And(f1, f2) => {
|
Formula::And(f1, f2) => {
|
||||||
write!(f, "and({:?},{:?})", f1, f2)?;
|
write!(f, "and({f1:?},{f2:?})")?;
|
||||||
}
|
}
|
||||||
Formula::Or(f1, f2) => {
|
Formula::Or(f1, f2) => {
|
||||||
write!(f, "or({:?},{:?})", f1, f2)?;
|
write!(f, "or({f1:?},{f2:?})")?;
|
||||||
}
|
}
|
||||||
Formula::Imp(f1, f2) => {
|
Formula::Imp(f1, f2) => {
|
||||||
write!(f, "imp({:?},{:?})", f1, f2)?;
|
write!(f, "imp({f1:?},{f2:?})")?;
|
||||||
}
|
}
|
||||||
Formula::Xor(f1, f2) => {
|
Formula::Xor(f1, f2) => {
|
||||||
write!(f, "xor({:?},{:?})", f1, f2)?;
|
write!(f, "xor({f1:?},{f2:?})")?;
|
||||||
}
|
}
|
||||||
Formula::Iff(f1, f2) => {
|
Formula::Iff(f1, f2) => {
|
||||||
write!(f, "iff({:?},{:?})", f1, f2)?;
|
write!(f, "iff({f1:?},{f2:?})")?;
|
||||||
}
|
}
|
||||||
Formula::Bot => {
|
Formula::Bot => {
|
||||||
write!(f, "Const(B)")?;
|
write!(f, "Const(B)")?;
|
||||||
@ -122,32 +128,33 @@ impl std::fmt::Debug for Formula<'_> {
|
|||||||
/// A parse structure to hold all the information given by the input file in one place.
|
/// A parse structure to hold all the information given by the input file in one place.
|
||||||
///
|
///
|
||||||
/// Due to an internal representation with [RefCell][std::cell::RefCell] and [Rc][std::rc::Rc] the values can be
|
/// Due to an internal representation with [RefCell][std::cell::RefCell] and [Rc][std::rc::Rc] the values can be
|
||||||
/// handed over to other structures without further storage needs.
|
/// handed over to other structures without further memory needs.
|
||||||
///
|
///
|
||||||
/// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead.
|
/// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct AdfParser<'a> {
|
pub struct AdfParser {
|
||||||
namelist: Rc<RefCell<Vec<String>>>,
|
/// A name for each statement (identified by index in vector)
|
||||||
dict: Rc<RefCell<HashMap<String, usize>>>,
|
pub namelist: Arc<RwLock<Vec<String>>>,
|
||||||
formulae: RefCell<Vec<Formula<'a>>>,
|
/// Inverse mapping from name to index of statement in vector above
|
||||||
formulaname: RefCell<Vec<String>>,
|
pub dict: Arc<RwLock<HashMap<String, usize>>>,
|
||||||
|
/// The formula (acceptance condition) for each statement identified by its index
|
||||||
|
pub formulae: RefCell<Vec<Formula>>,
|
||||||
|
/// The formula for each statement identified by its index
|
||||||
|
pub formulaname: RefCell<Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for AdfParser<'_> {
|
impl Default for AdfParser {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
AdfParser {
|
AdfParser {
|
||||||
namelist: Rc::new(RefCell::new(Vec::new())),
|
namelist: Arc::new(RwLock::new(Vec::new())),
|
||||||
dict: Rc::new(RefCell::new(HashMap::new())),
|
dict: Arc::new(RwLock::new(HashMap::new())),
|
||||||
formulae: RefCell::new(Vec::new()),
|
formulae: RefCell::new(Vec::new()),
|
||||||
formulaname: RefCell::new(Vec::new()),
|
formulaname: RefCell::new(Vec::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'b> AdfParser<'b>
|
impl<'a> AdfParser {
|
||||||
where
|
|
||||||
'a: 'b,
|
|
||||||
{
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
fn parse_statements(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
|
fn parse_statements(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
|
||||||
move |input| {
|
move |input| {
|
||||||
@ -157,7 +164,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Parses a full input file and creates internal structures.
|
/// Parses a full input file and creates internal structures.
|
||||||
/// Note that this method returns a closure (see the following Example for the correct usage).
|
/// Note that this method returns a closure (see the following example for the correct usage).
|
||||||
/// # Example
|
/// # Example
|
||||||
/// ```
|
/// ```
|
||||||
/// let parser = adf_bdd::parser::AdfParser::default();
|
/// let parser = adf_bdd::parser::AdfParser::default();
|
||||||
@ -176,8 +183,14 @@ where
|
|||||||
|
|
||||||
fn parse_statement(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
|
fn parse_statement(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
|
||||||
|input| {
|
|input| {
|
||||||
let mut dict = self.dict.borrow_mut();
|
let mut dict = self
|
||||||
let mut namelist = self.namelist.borrow_mut();
|
.dict
|
||||||
|
.write()
|
||||||
|
.expect("RwLock of dict could not get write access");
|
||||||
|
let mut namelist = self
|
||||||
|
.namelist
|
||||||
|
.write()
|
||||||
|
.expect("RwLock of namelist could not get write access");
|
||||||
let (remain, statement) =
|
let (remain, statement) =
|
||||||
terminated(AdfParser::statement, terminated(tag("."), multispace0))(input)?;
|
terminated(AdfParser::statement, terminated(tag("."), multispace0))(input)?;
|
||||||
if !dict.contains_key(statement) {
|
if !dict.contains_key(statement) {
|
||||||
@ -200,33 +213,53 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AdfParser<'_> {
|
impl AdfParser {
|
||||||
|
/// Creates a new parser, utilising the already existing [VarContainer]
|
||||||
|
pub fn with_var_container(var_container: VarContainer) -> AdfParser {
|
||||||
|
AdfParser {
|
||||||
|
namelist: var_container.names(),
|
||||||
|
dict: var_container.mappings(),
|
||||||
|
formulae: RefCell::new(Vec::new()),
|
||||||
|
formulaname: RefCell::new(Vec::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AdfParser {
|
||||||
/// after an update to the namelist, all indizes are updated
|
/// after an update to the namelist, all indizes are updated
|
||||||
fn regenerate_indizes(&self) {
|
fn regenerate_indizes(&self) {
|
||||||
self.namelist
|
self.namelist
|
||||||
.as_ref()
|
.read()
|
||||||
.borrow()
|
.expect("ReadLock on namelist failed")
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.for_each(|(i, elem)| {
|
.for_each(|(i, elem)| {
|
||||||
self.dict.as_ref().borrow_mut().insert(elem.clone(), i);
|
self.dict
|
||||||
|
.write()
|
||||||
|
.expect("WriteLock on dict failed")
|
||||||
|
.insert(elem.clone(), i);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sort the variables in lexicographical order.
|
/// Sort the variables in lexicographical order.
|
||||||
/// Results which got used before might become corrupted.
|
/// Results, which got used before might become corrupted.
|
||||||
|
/// Ensure that all used data is physically copied.
|
||||||
pub fn varsort_lexi(&self) -> &Self {
|
pub fn varsort_lexi(&self) -> &Self {
|
||||||
self.namelist.as_ref().borrow_mut().sort_unstable();
|
self.namelist
|
||||||
|
.write()
|
||||||
|
.expect("WriteLock on namelist failed")
|
||||||
|
.sort_unstable();
|
||||||
self.regenerate_indizes();
|
self.regenerate_indizes();
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sort the variables in alphanumerical order.
|
/// Sort the variables in alphanumerical order.
|
||||||
/// Results which got used before might become corrupted.
|
/// Results, which got used before might become corrupted.
|
||||||
|
/// Ensure that all used data is physically copied.
|
||||||
pub fn varsort_alphanum(&self) -> &Self {
|
pub fn varsort_alphanum(&self) -> &Self {
|
||||||
self.namelist
|
self.namelist
|
||||||
.as_ref()
|
.write()
|
||||||
.borrow_mut()
|
.expect("WriteLock on namelist failed")
|
||||||
.string_sort_unstable(natural_lexical_cmp);
|
.string_sort_unstable(natural_lexical_cmp);
|
||||||
self.regenerate_indizes();
|
self.regenerate_indizes();
|
||||||
self
|
self
|
||||||
@ -252,7 +285,7 @@ impl AdfParser<'_> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn atomic_term(input: &str) -> IResult<&str, Formula> {
|
fn atomic_term(input: &str) -> IResult<&str, Formula> {
|
||||||
AdfParser::atomic(input).map(|(input, result)| (input, Formula::Atom(result)))
|
AdfParser::atomic(input).map(|(input, result)| (input, Formula::Atom(result.to_string())))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn formula(input: &str) -> IResult<&str, Formula> {
|
fn formula(input: &str) -> IResult<&str, Formula> {
|
||||||
@ -338,28 +371,41 @@ impl AdfParser<'_> {
|
|||||||
))(input)
|
))(input)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allows insight of the number of parsed Statements
|
/// Allows insight of the number of parsed statements.
|
||||||
pub fn dict_size(&self) -> usize {
|
pub fn dict_size(&self) -> usize {
|
||||||
//self.dict.borrow().len()
|
//self.dict.borrow().len()
|
||||||
self.dict.as_ref().borrow().len()
|
self.dict.read().expect("ReadLock on dict failed").len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the number-representation and position of a given variable/statement in string-representation
|
/// Returns the number-representation and position of a given statement in string-representation.
|
||||||
|
///
|
||||||
|
/// Will return [None] if the string does no occur in the dictionary.
|
||||||
pub fn dict_value(&self, value: &str) -> Option<usize> {
|
pub fn dict_value(&self, value: &str) -> Option<usize> {
|
||||||
self.dict.as_ref().borrow().get(value).copied()
|
self.dict
|
||||||
|
.read()
|
||||||
|
.expect("ReadLock on dict failed")
|
||||||
|
.get(value)
|
||||||
|
.copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the acceptance condition of a statement at the given positon
|
/// Returns the acceptance condition of a statement at the given position.
|
||||||
|
///
|
||||||
|
/// Will return [None] if the position does not map to a formula.
|
||||||
pub fn ac_at(&self, idx: usize) -> Option<Formula> {
|
pub fn ac_at(&self, idx: usize) -> Option<Formula> {
|
||||||
self.formulae.borrow().get(idx).cloned()
|
self.formulae.borrow().get(idx).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn dict_rc_refcell(&self) -> Rc<RefCell<HashMap<String, usize>>> {
|
pub(crate) fn dict(&self) -> Arc<RwLock<HashMap<String, usize>>> {
|
||||||
Rc::clone(&self.dict)
|
Arc::clone(&self.dict)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn namelist_rc_refcell(&self) -> Rc<RefCell<Vec<String>>> {
|
pub(crate) fn namelist(&self) -> Arc<RwLock<Vec<String>>> {
|
||||||
Rc::clone(&self.namelist)
|
Arc::clone(&self.namelist)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a [`VarContainer`][crate::datatypes::adf::VarContainer] which allows to access the variable information gathered by the parser
|
||||||
|
pub fn var_container(&self) -> VarContainer {
|
||||||
|
VarContainer::from_parser(self.namelist(), self.dict())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn formula_count(&self) -> usize {
|
pub(crate) fn formula_count(&self) -> usize {
|
||||||
@ -373,8 +419,8 @@ impl AdfParser<'_> {
|
|||||||
.map(|name| {
|
.map(|name| {
|
||||||
*self
|
*self
|
||||||
.dict
|
.dict
|
||||||
.as_ref()
|
.read()
|
||||||
.borrow()
|
.expect("ReadLock on dict failed")
|
||||||
.get(name)
|
.get(name)
|
||||||
.expect("Dictionary should contain all the used formulanames")
|
.expect("Dictionary should contain all the used formulanames")
|
||||||
})
|
})
|
||||||
@ -435,7 +481,7 @@ mod test {
|
|||||||
let (_remain, result) = AdfParser::formula(input).unwrap();
|
let (_remain, result) = AdfParser::formula(input).unwrap();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", result),
|
format!("{result:?}"),
|
||||||
"and(or(not(a),iff( iff left ,b)),xor(imp(c,d),e))"
|
"and(or(not(a),iff( iff left ,b)),xor(imp(c,d),e))"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -459,7 +505,10 @@ mod test {
|
|||||||
assert_eq!(parser.dict_value("b"), Some(2usize));
|
assert_eq!(parser.dict_value("b"), Some(2usize));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
format!("{:?}", parser.ac_at(1).unwrap()),
|
format!("{:?}", parser.ac_at(1).unwrap()),
|
||||||
format!("{:?}", Formula::Not(Box::new(Formula::Atom("a"))))
|
format!(
|
||||||
|
"{:?}",
|
||||||
|
Formula::Not(Box::new(Formula::Atom("a".to_string())))
|
||||||
|
)
|
||||||
);
|
);
|
||||||
assert_eq!(parser.formula_count(), 3);
|
assert_eq!(parser.formula_count(), 3);
|
||||||
assert_eq!(parser.formula_order(), vec![0, 2, 1]);
|
assert_eq!(parser.formula_order(), vec![0, 2, 1]);
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
#[test]
|
#[test]
|
||||||
fn {name}() {{
|
fn {name}_biodivine() {{
|
||||||
let resource = "{path}";
|
let resource = "{path}";
|
||||||
log::debug!("resource: {{}}", resource);
|
log::debug!("resource: {{}}", resource);
|
||||||
let grounded = "{grounded}";
|
let grounded = "{grounded}";
|
||||||
@ -18,3 +18,23 @@ fn {name}() {{
|
|||||||
);
|
);
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn {name}_naive() {{
|
||||||
|
let resource = "{path}";
|
||||||
|
log::debug!("resource: {{}}", resource);
|
||||||
|
let grounded = "{grounded}";
|
||||||
|
log::debug!("Grounded: {{}}", grounded);
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
let expected_result = std::fs::read_to_string(grounded);
|
||||||
|
assert!(expected_result.is_ok());
|
||||||
|
let input = std::fs::read_to_string(resource).unwrap();
|
||||||
|
parser.parse()(&input).unwrap();
|
||||||
|
parser.varsort_alphanum();
|
||||||
|
let mut adf = adf_bdd::adf::Adf::from_parser(&parser);
|
||||||
|
let grounded = adf.grounded();
|
||||||
|
assert_eq!(
|
||||||
|
format!("{{}}", adf.print_interpretation(&grounded)),
|
||||||
|
format!("{{}}\n",expected_result.unwrap())
|
||||||
|
);
|
||||||
|
}}
|
||||||
|
|
||||||
|
|||||||
32
server/Cargo.toml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
[package]
|
||||||
|
name = "adf-bdd-server"
|
||||||
|
version = "0.3.0"
|
||||||
|
authors = ["Lukas Gerlach <lukas.gerlach@tu-dresden.de>"]
|
||||||
|
edition = "2021"
|
||||||
|
homepage = "https://ellmau.github.io/adf-obdd"
|
||||||
|
repository = "https://github.com/ellmau/adf-obdd"
|
||||||
|
license = "MIT"
|
||||||
|
exclude = ["res/", "./flake*", "*.nix", ".envrc", "_config.yml", "tarpaulin-report.*", "*~"]
|
||||||
|
description = "Offer Solving ADFs as a service"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
adf_bdd = { version="0.3.1", path="../lib", features = ["frontend"] }
|
||||||
|
actix-web = "4"
|
||||||
|
actix-cors = "0.6"
|
||||||
|
actix-files = "0.6"
|
||||||
|
env_logger = "0.9"
|
||||||
|
log = "0.4"
|
||||||
|
serde = "1"
|
||||||
|
mongodb = "2.4.0"
|
||||||
|
actix-identity = "0.5.2"
|
||||||
|
argon2 = "0.5.0"
|
||||||
|
actix-session = { version="0.7.2", features = ["cookie-session"] }
|
||||||
|
names = "0.14.0"
|
||||||
|
futures-util = "0.3.28"
|
||||||
|
actix-multipart = "0.6.0"
|
||||||
|
|
||||||
|
[features]
|
||||||
|
cors_for_local_development = []
|
||||||
|
mock_long_computations = []
|
||||||
13
server/README.md
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Backend for Webservice
|
||||||
|
|
||||||
|
This directory contains the backend for <https://adf-bdd.dev> built using actix.rs.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
For local development run:
|
||||||
|
|
||||||
|
- `docker compose up` to run a MongoDB including a web admin interface
|
||||||
|
- `MONGODB_URI=mongodb://root:example@localhost:27017/ cargo run -F cors_for_local_development -F mock_long_computations` to start the server, connecting it to the MongoDB and allowing CORS from the frontend (running on a separate development server)
|
||||||
|
|
||||||
|
The server listens on `localhost:8080`.
|
||||||
|
The feature flag `-F mock_long_computations` is optional and just mimics longer computation times by using `std::thread::sleep`. This can be helpful to check how the frontend will behave in such cases.
|
||||||
24
server/docker-compose.yml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
version: '3.1'
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
mongo:
|
||||||
|
image: mongo:6
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- 27017:27017
|
||||||
|
environment:
|
||||||
|
MONGO_INITDB_ROOT_USERNAME: root
|
||||||
|
MONGO_INITDB_ROOT_PASSWORD: example
|
||||||
|
volumes:
|
||||||
|
- ./mongodb-data:/data/db
|
||||||
|
|
||||||
|
mongo-express:
|
||||||
|
image: mongo-express
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- 8081:8081
|
||||||
|
environment:
|
||||||
|
ME_CONFIG_MONGODB_ADMINUSERNAME: root
|
||||||
|
ME_CONFIG_MONGODB_ADMINPASSWORD: example
|
||||||
|
ME_CONFIG_MONGODB_URL: mongodb://root:example@mongo:27017/
|
||||||
848
server/src/adf.rs
Normal file
@ -0,0 +1,848 @@
|
|||||||
|
use std::cell::RefCell;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
#[cfg(feature = "mock_long_computations")]
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use actix_identity::Identity;
|
||||||
|
use actix_multipart::form::{tempfile::TempFile, text::Text, MultipartForm};
|
||||||
|
use actix_web::rt::spawn;
|
||||||
|
use actix_web::rt::task::spawn_blocking;
|
||||||
|
use actix_web::rt::time::timeout;
|
||||||
|
use actix_web::{delete, get, post, put, web, HttpMessage, HttpRequest, HttpResponse, Responder};
|
||||||
|
use adf_bdd::datatypes::adf::VarContainer;
|
||||||
|
use adf_bdd::datatypes::{BddNode, Term, Var};
|
||||||
|
use futures_util::{FutureExt, TryStreamExt};
|
||||||
|
use mongodb::bson::doc;
|
||||||
|
use mongodb::bson::{to_bson, Bson};
|
||||||
|
use mongodb::results::DeleteResult;
|
||||||
|
use names::{Generator, Name};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
use adf_bdd::adfbiodivine::Adf as BdAdf;
|
||||||
|
use adf_bdd::obdd::Bdd;
|
||||||
|
use adf_bdd::parser::{AdfParser, Formula};
|
||||||
|
|
||||||
|
use crate::config::{AppState, RunningInfo, Task, ADF_COLL, COMPUTE_TIME, DB_NAME, USER_COLL};
|
||||||
|
use crate::user::{username_exists, User};
|
||||||
|
|
||||||
|
use crate::double_labeled_graph::DoubleLabeledGraph;
|
||||||
|
|
||||||
|
type Ac = Vec<Term>;
|
||||||
|
type AcDb = Vec<String>;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
|
||||||
|
pub(crate) enum Parsing {
|
||||||
|
Naive,
|
||||||
|
Hybrid,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize)]
|
||||||
|
pub(crate) enum Strategy {
|
||||||
|
Ground,
|
||||||
|
Complete,
|
||||||
|
Stable,
|
||||||
|
StableCountingA,
|
||||||
|
StableCountingB,
|
||||||
|
StableNogood,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Deserialize, Serialize)]
|
||||||
|
pub(crate) struct AcAndGraph {
|
||||||
|
pub(crate) ac: AcDb,
|
||||||
|
pub(crate) graph: DoubleLabeledGraph,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<AcAndGraph> for Bson {
|
||||||
|
fn from(source: AcAndGraph) -> Self {
|
||||||
|
to_bson(&source).expect("Serialization should work")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Default, Deserialize, Serialize)]
|
||||||
|
#[serde(tag = "type", content = "content")]
|
||||||
|
pub(crate) enum OptionWithError<T> {
|
||||||
|
Some(T),
|
||||||
|
Error(String),
|
||||||
|
#[default]
|
||||||
|
None,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> OptionWithError<T> {
|
||||||
|
fn is_some(&self) -> bool {
|
||||||
|
matches!(self, Self::Some(_))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Serialize> From<OptionWithError<T>> for Bson {
|
||||||
|
fn from(source: OptionWithError<T>) -> Self {
|
||||||
|
to_bson(&source).expect("Serialization should work")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type AcsAndGraphsOpt = OptionWithError<Vec<AcAndGraph>>;
|
||||||
|
|
||||||
|
#[derive(Default, Deserialize, Serialize)]
|
||||||
|
pub(crate) struct AcsPerStrategy {
|
||||||
|
pub(crate) parse_only: AcsAndGraphsOpt,
|
||||||
|
pub(crate) ground: AcsAndGraphsOpt,
|
||||||
|
pub(crate) complete: AcsAndGraphsOpt,
|
||||||
|
pub(crate) stable: AcsAndGraphsOpt,
|
||||||
|
pub(crate) stable_counting_a: AcsAndGraphsOpt,
|
||||||
|
pub(crate) stable_counting_b: AcsAndGraphsOpt,
|
||||||
|
pub(crate) stable_nogood: AcsAndGraphsOpt,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Deserialize, Serialize)]
|
||||||
|
pub(crate) struct VarContainerDb {
|
||||||
|
names: Vec<String>,
|
||||||
|
mapping: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<VarContainer> for VarContainerDb {
|
||||||
|
fn from(source: VarContainer) -> Self {
|
||||||
|
Self {
|
||||||
|
names: source.names().read().unwrap().clone(),
|
||||||
|
mapping: source
|
||||||
|
.mappings()
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (k.clone(), v.to_string()))
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<VarContainerDb> for VarContainer {
|
||||||
|
fn from(source: VarContainerDb) -> Self {
|
||||||
|
Self::from_parser(
|
||||||
|
Arc::new(RwLock::new(source.names)),
|
||||||
|
Arc::new(RwLock::new(
|
||||||
|
source
|
||||||
|
.mapping
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k, v.parse().unwrap()))
|
||||||
|
.collect(),
|
||||||
|
)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Deserialize, Serialize)]
|
||||||
|
pub(crate) struct BddNodeDb {
|
||||||
|
var: String,
|
||||||
|
lo: String,
|
||||||
|
hi: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BddNode> for BddNodeDb {
|
||||||
|
fn from(source: BddNode) -> Self {
|
||||||
|
Self {
|
||||||
|
var: source.var().0.to_string(),
|
||||||
|
lo: source.lo().0.to_string(),
|
||||||
|
hi: source.hi().0.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BddNodeDb> for BddNode {
|
||||||
|
fn from(source: BddNodeDb) -> Self {
|
||||||
|
Self::new(
|
||||||
|
Var(source.var.parse().unwrap()),
|
||||||
|
Term(source.lo.parse().unwrap()),
|
||||||
|
Term(source.hi.parse().unwrap()),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SimplifiedBdd = Vec<BddNodeDb>;
|
||||||
|
|
||||||
|
#[derive(Clone, Deserialize, Serialize)]
|
||||||
|
pub(crate) struct SimplifiedAdf {
|
||||||
|
pub(crate) ordering: VarContainerDb,
|
||||||
|
pub(crate) bdd: SimplifiedBdd,
|
||||||
|
pub(crate) ac: AcDb,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Adf> for SimplifiedAdf {
|
||||||
|
fn from(source: Adf) -> Self {
|
||||||
|
Self {
|
||||||
|
ordering: source.ordering.into(),
|
||||||
|
bdd: source.bdd.nodes.into_iter().map(Into::into).collect(),
|
||||||
|
ac: source.ac.into_iter().map(|t| t.0.to_string()).collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<SimplifiedAdf> for Adf {
|
||||||
|
fn from(source: SimplifiedAdf) -> Self {
|
||||||
|
let bdd = Bdd::from(
|
||||||
|
source
|
||||||
|
.bdd
|
||||||
|
.into_iter()
|
||||||
|
.map(Into::into)
|
||||||
|
.collect::<Vec<BddNode>>(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Adf::from((
|
||||||
|
source.ordering.into(),
|
||||||
|
bdd,
|
||||||
|
source
|
||||||
|
.ac
|
||||||
|
.into_iter()
|
||||||
|
.map(|t| Term(t.parse().unwrap()))
|
||||||
|
.collect(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SimplifiedAdfOpt = OptionWithError<SimplifiedAdf>;
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
pub(crate) struct AdfProblem {
|
||||||
|
pub(crate) name: String,
|
||||||
|
pub(crate) username: String,
|
||||||
|
pub(crate) code: String,
|
||||||
|
pub(crate) parsing_used: Parsing,
|
||||||
|
#[serde(default)]
|
||||||
|
pub(crate) is_af: bool,
|
||||||
|
pub(crate) adf: SimplifiedAdfOpt,
|
||||||
|
pub(crate) acs_per_strategy: AcsPerStrategy,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(MultipartForm)]
|
||||||
|
struct AddAdfProblemBodyMultipart {
|
||||||
|
name: Text<String>,
|
||||||
|
code: Option<Text<String>>, // Either Code or File is set
|
||||||
|
file: Option<TempFile>, // Either Code or File is set
|
||||||
|
parsing: Text<Parsing>,
|
||||||
|
is_af: Text<bool>, // if its not an AF then it is an ADF
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct AddAdfProblemBodyPlain {
|
||||||
|
name: String,
|
||||||
|
code: String,
|
||||||
|
parsing: Parsing,
|
||||||
|
is_af: bool, // if its not an AF then it is an ADF
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryFrom<AddAdfProblemBodyMultipart> for AddAdfProblemBodyPlain {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
fn try_from(source: AddAdfProblemBodyMultipart) -> Result<Self, Self::Error> {
|
||||||
|
Ok(Self {
|
||||||
|
name: source.name.into_inner(),
|
||||||
|
code: source
|
||||||
|
.file
|
||||||
|
.map(|f| std::io::read_to_string(f.file).expect("TempFile should be readable"))
|
||||||
|
.or_else(|| source.code.map(|c| c.into_inner()))
|
||||||
|
.and_then(|code| (!code.is_empty()).then_some(code))
|
||||||
|
.ok_or("Either a file or the code has to be provided.")?,
|
||||||
|
parsing: source.parsing.into_inner(),
|
||||||
|
is_af: source.is_af.into_inner(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn adf_problem_exists(
|
||||||
|
adf_coll: &mongodb::Collection<AdfProblem>,
|
||||||
|
name: &str,
|
||||||
|
username: &str,
|
||||||
|
) -> bool {
|
||||||
|
adf_coll
|
||||||
|
.find_one(doc! { "name": name, "username": username }, None)
|
||||||
|
.await
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize)]
|
||||||
|
struct AdfProblemInfo {
|
||||||
|
name: String,
|
||||||
|
code: String,
|
||||||
|
parsing_used: Parsing,
|
||||||
|
is_af: bool,
|
||||||
|
acs_per_strategy: AcsPerStrategy,
|
||||||
|
running_tasks: Vec<Task>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AdfProblemInfo {
|
||||||
|
fn from_adf_prob_and_tasks(adf: AdfProblem, tasks: &HashSet<RunningInfo>) -> Self {
|
||||||
|
AdfProblemInfo {
|
||||||
|
name: adf.name.clone(),
|
||||||
|
code: adf.code,
|
||||||
|
parsing_used: adf.parsing_used,
|
||||||
|
is_af: adf.is_af,
|
||||||
|
acs_per_strategy: adf.acs_per_strategy,
|
||||||
|
running_tasks: tasks
|
||||||
|
.iter()
|
||||||
|
.filter_map(|t| {
|
||||||
|
(t.adf_name == adf.name && t.username == adf.username).then_some(t.task)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct AF(Vec<Vec<usize>>);
|
||||||
|
|
||||||
|
impl From<AF> for AdfParser {
|
||||||
|
fn from(source: AF) -> Self {
|
||||||
|
let names: Vec<String> = (0..source.0.len())
|
||||||
|
.map(|val| (val + 1).to_string())
|
||||||
|
.collect();
|
||||||
|
let dict: HashMap<String, usize> = names
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(i, val)| (val.clone(), i))
|
||||||
|
.collect();
|
||||||
|
let formulae: Vec<Formula> = source
|
||||||
|
.0
|
||||||
|
.into_iter()
|
||||||
|
.map(|attackers| {
|
||||||
|
attackers.into_iter().fold(Formula::Top, |acc, attacker| {
|
||||||
|
Formula::And(
|
||||||
|
Box::new(acc),
|
||||||
|
Box::new(Formula::Not(Box::new(Formula::Atom(
|
||||||
|
(attacker + 1).to_string(),
|
||||||
|
)))),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let formulanames = names.clone();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
namelist: Arc::new(RwLock::new(names)),
|
||||||
|
dict: Arc::new(RwLock::new(dict)),
|
||||||
|
formulae: RefCell::new(formulae),
|
||||||
|
formulaname: RefCell::new(formulanames),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_af(code: String) -> Result<AdfParser, &'static str> {
|
||||||
|
let mut lines = code.lines();
|
||||||
|
|
||||||
|
let Some(first_line) = lines.next() else {
|
||||||
|
return Err("There must be at least one line in the AF input.");
|
||||||
|
};
|
||||||
|
|
||||||
|
let first_line: Vec<_> = first_line.split(" ").collect();
|
||||||
|
if first_line[0] != "p" || first_line[1] != "af" {
|
||||||
|
return Err("Expected first line to be of the form: p af <n>");
|
||||||
|
}
|
||||||
|
|
||||||
|
let Ok(num_arguments) = first_line[2].parse::<usize>() else {
|
||||||
|
return Err("Could not convert number of arguments to u32; expected first line to be of the form: p af <n>");
|
||||||
|
};
|
||||||
|
|
||||||
|
let attacks_opt: Option<Vec<(usize, usize)>> = lines
|
||||||
|
.filter(|line| !line.starts_with('#') && !line.is_empty())
|
||||||
|
.map(|line| {
|
||||||
|
let mut line = line.split(" ");
|
||||||
|
let a = line.next()?;
|
||||||
|
let b = line.next()?;
|
||||||
|
if line.next().is_some() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some((a.parse::<usize>().ok()?, b.parse::<usize>().ok()?))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
let Some(attacks) = attacks_opt else {
|
||||||
|
return Err("Line must be of the form: n m");
|
||||||
|
};
|
||||||
|
|
||||||
|
// index in outer vector represents attacked element
|
||||||
|
let mut is_attacked_by: Vec<Vec<usize>> = vec![vec![]; num_arguments];
|
||||||
|
for (a, b) in attacks {
|
||||||
|
is_attacked_by[b - 1].push(a - 1); // we normalize names to be zero-indexed
|
||||||
|
}
|
||||||
|
|
||||||
|
let hacked_adf_parser = AdfParser::from(AF(is_attacked_by));
|
||||||
|
|
||||||
|
Ok(hacked_adf_parser)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[post("/add")]
|
||||||
|
async fn add_adf_problem(
|
||||||
|
req: HttpRequest,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
identity: Option<Identity>,
|
||||||
|
req_body: MultipartForm<AddAdfProblemBodyMultipart>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let adf_problem_input: AddAdfProblemBodyPlain = match req_body.into_inner().try_into() {
|
||||||
|
Ok(input) => input,
|
||||||
|
Err(err) => return HttpResponse::BadRequest().body(err),
|
||||||
|
};
|
||||||
|
let adf_coll: mongodb::Collection<AdfProblem> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(ADF_COLL);
|
||||||
|
let user_coll: mongodb::Collection<User> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(USER_COLL);
|
||||||
|
|
||||||
|
let username = match identity.map(|id| id.id()) {
|
||||||
|
None => {
|
||||||
|
// Create and log in temporary user
|
||||||
|
let gen = Generator::with_naming(Name::Numbered);
|
||||||
|
let candidates = gen.take(10);
|
||||||
|
|
||||||
|
let mut name: Option<String> = None;
|
||||||
|
for candidate in candidates {
|
||||||
|
if name.is_some() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(username_exists(&user_coll, &candidate).await) {
|
||||||
|
name = Some(candidate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let username = match name {
|
||||||
|
Some(name) => name,
|
||||||
|
None => {
|
||||||
|
return HttpResponse::InternalServerError().body("Could not generate new name.")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match user_coll
|
||||||
|
.insert_one(
|
||||||
|
User {
|
||||||
|
username: username.clone(),
|
||||||
|
password: None,
|
||||||
|
},
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(err) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
}
|
||||||
|
|
||||||
|
Identity::login(&req.extensions(), username.clone()).unwrap();
|
||||||
|
|
||||||
|
username
|
||||||
|
}
|
||||||
|
Some(Err(err)) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Some(Ok(username)) => username,
|
||||||
|
};
|
||||||
|
|
||||||
|
let problem_name = if !adf_problem_input.name.is_empty() {
|
||||||
|
if adf_problem_exists(&adf_coll, &adf_problem_input.name, &username).await {
|
||||||
|
return HttpResponse::Conflict()
|
||||||
|
.body("ADF Problem with that name already exists. Please pick another one!");
|
||||||
|
}
|
||||||
|
|
||||||
|
adf_problem_input.name.clone()
|
||||||
|
} else {
|
||||||
|
let gen = Generator::with_naming(Name::Numbered);
|
||||||
|
let candidates = gen.take(10);
|
||||||
|
|
||||||
|
let mut name: Option<String> = None;
|
||||||
|
for candidate in candidates {
|
||||||
|
if name.is_some() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(adf_problem_exists(&adf_coll, &candidate, &username).await) {
|
||||||
|
name = Some(candidate);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match name {
|
||||||
|
Some(name) => name,
|
||||||
|
None => {
|
||||||
|
return HttpResponse::InternalServerError().body("Could not generate new name.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let adf_problem: AdfProblem = AdfProblem {
|
||||||
|
name: problem_name.clone(),
|
||||||
|
username: username.clone(),
|
||||||
|
code: adf_problem_input.code.clone(),
|
||||||
|
parsing_used: adf_problem_input.parsing,
|
||||||
|
is_af: adf_problem_input.is_af,
|
||||||
|
adf: SimplifiedAdfOpt::None,
|
||||||
|
acs_per_strategy: AcsPerStrategy::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = adf_coll.insert_one(&adf_problem, None).await;
|
||||||
|
|
||||||
|
if let Err(err) = result {
|
||||||
|
return HttpResponse::InternalServerError()
|
||||||
|
.body(format!("Could not create Database entry. Error: {err}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let username_clone = username.clone();
|
||||||
|
let problem_name_clone = problem_name.clone();
|
||||||
|
|
||||||
|
let adf_fut = timeout(
|
||||||
|
COMPUTE_TIME,
|
||||||
|
spawn_blocking(move || {
|
||||||
|
let running_info = RunningInfo {
|
||||||
|
username: username_clone,
|
||||||
|
adf_name: problem_name_clone,
|
||||||
|
task: Task::Parse,
|
||||||
|
};
|
||||||
|
|
||||||
|
app_state
|
||||||
|
.currently_running
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(running_info.clone());
|
||||||
|
|
||||||
|
#[cfg(feature = "mock_long_computations")]
|
||||||
|
std::thread::sleep(Duration::from_secs(20));
|
||||||
|
|
||||||
|
let (parser, parse_result) = {
|
||||||
|
if adf_problem_input.is_af {
|
||||||
|
parse_af(adf_problem_input.code)
|
||||||
|
.map(|p| (p, Ok(())))
|
||||||
|
.unwrap_or_else(|e| (AdfParser::default(), Err(e)))
|
||||||
|
} else {
|
||||||
|
let parser = AdfParser::default();
|
||||||
|
let parse_result = parser.parse()(&adf_problem_input.code)
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(|_| "ADF could not be parsed, double check your input!");
|
||||||
|
|
||||||
|
(parser, parse_result)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = parse_result.map(|_| {
|
||||||
|
let lib_adf = match adf_problem_input.parsing {
|
||||||
|
Parsing::Naive => Adf::from_parser(&parser),
|
||||||
|
Parsing::Hybrid => {
|
||||||
|
let bd_adf = BdAdf::from_parser(&parser);
|
||||||
|
bd_adf.hybrid_step_opt(false)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let ac_and_graph = AcAndGraph {
|
||||||
|
ac: lib_adf.ac.iter().map(|t| t.0.to_string()).collect(),
|
||||||
|
graph: DoubleLabeledGraph::from_adf_and_ac(&lib_adf, None),
|
||||||
|
};
|
||||||
|
|
||||||
|
(SimplifiedAdf::from(lib_adf), ac_and_graph)
|
||||||
|
});
|
||||||
|
|
||||||
|
app_state
|
||||||
|
.currently_running
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.remove(&running_info);
|
||||||
|
|
||||||
|
result
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
spawn(adf_fut.then(move |adf_res| async move {
|
||||||
|
let (adf, ac_and_graph): (SimplifiedAdfOpt, AcsAndGraphsOpt) = match adf_res {
|
||||||
|
Err(err) => (
|
||||||
|
SimplifiedAdfOpt::Error(err.to_string()),
|
||||||
|
AcsAndGraphsOpt::Error(err.to_string()),
|
||||||
|
),
|
||||||
|
Ok(Err(err)) => (
|
||||||
|
SimplifiedAdfOpt::Error(err.to_string()),
|
||||||
|
AcsAndGraphsOpt::Error(err.to_string()),
|
||||||
|
),
|
||||||
|
Ok(Ok(Err(err))) => (
|
||||||
|
SimplifiedAdfOpt::Error(err.to_string()),
|
||||||
|
AcsAndGraphsOpt::Error(err.to_string()),
|
||||||
|
),
|
||||||
|
Ok(Ok(Ok((adf, ac_and_graph)))) => (
|
||||||
|
SimplifiedAdfOpt::Some(adf),
|
||||||
|
AcsAndGraphsOpt::Some(vec![ac_and_graph]),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = adf_coll
|
||||||
|
.update_one(
|
||||||
|
doc! { "name": problem_name, "username": username },
|
||||||
|
doc! { "$set": { "adf": &adf, "acs_per_strategy.parse_only": &ac_and_graph } },
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
if let Err(err) = result {
|
||||||
|
log::error!("{err}");
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
HttpResponse::Ok().body("Parsing started...")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct SolveAdfProblemBody {
|
||||||
|
strategy: Strategy,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[put("/{problem_name}/solve")]
|
||||||
|
async fn solve_adf_problem(
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
identity: Option<Identity>,
|
||||||
|
path: web::Path<String>,
|
||||||
|
req_body: web::Json<SolveAdfProblemBody>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let problem_name = path.into_inner();
|
||||||
|
let adf_problem_input: SolveAdfProblemBody = req_body.into_inner();
|
||||||
|
let adf_coll: mongodb::Collection<AdfProblem> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(ADF_COLL);
|
||||||
|
|
||||||
|
let username = match identity.map(|id| id.id()) {
|
||||||
|
Option::None => {
|
||||||
|
return HttpResponse::Unauthorized().body("You need to login to add an ADF problem.")
|
||||||
|
}
|
||||||
|
Some(Err(err)) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Some(Ok(username)) => username,
|
||||||
|
};
|
||||||
|
|
||||||
|
let adf_problem = match adf_coll
|
||||||
|
.find_one(doc! { "name": &problem_name, "username": &username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(err) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(Option::None) => {
|
||||||
|
return HttpResponse::NotFound()
|
||||||
|
.body(format!("ADF problem with name {problem_name} not found."))
|
||||||
|
}
|
||||||
|
Ok(Some(prob)) => prob,
|
||||||
|
};
|
||||||
|
|
||||||
|
let simp_adf: SimplifiedAdf = match adf_problem.adf {
|
||||||
|
SimplifiedAdfOpt::None => {
|
||||||
|
return HttpResponse::BadRequest().body("The ADF problem has not been parsed yet.")
|
||||||
|
}
|
||||||
|
SimplifiedAdfOpt::Error(err) => {
|
||||||
|
return HttpResponse::BadRequest().body(format!(
|
||||||
|
"The ADF problem could not be parsed. Update it and try again. Error: {err}"
|
||||||
|
))
|
||||||
|
}
|
||||||
|
SimplifiedAdfOpt::Some(adf) => adf,
|
||||||
|
};
|
||||||
|
|
||||||
|
let has_been_solved = match adf_problem_input.strategy {
|
||||||
|
Strategy::Complete => adf_problem.acs_per_strategy.complete.is_some(),
|
||||||
|
Strategy::Ground => adf_problem.acs_per_strategy.ground.is_some(),
|
||||||
|
Strategy::Stable => adf_problem.acs_per_strategy.stable.is_some(),
|
||||||
|
Strategy::StableCountingA => adf_problem.acs_per_strategy.stable_counting_a.is_some(),
|
||||||
|
Strategy::StableCountingB => adf_problem.acs_per_strategy.stable_counting_b.is_some(),
|
||||||
|
Strategy::StableNogood => adf_problem.acs_per_strategy.stable_nogood.is_some(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let username_clone = username.clone();
|
||||||
|
let problem_name_clone = problem_name.clone();
|
||||||
|
|
||||||
|
let running_info = RunningInfo {
|
||||||
|
username: username_clone,
|
||||||
|
adf_name: problem_name_clone,
|
||||||
|
task: Task::Solve(adf_problem_input.strategy),
|
||||||
|
};
|
||||||
|
|
||||||
|
// NOTE: we could also return the result here instead of throwing an error but I think the canonical way should just be to call the get endpoint for the problem.
|
||||||
|
if has_been_solved
|
||||||
|
|| app_state
|
||||||
|
.currently_running
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.contains(&running_info)
|
||||||
|
{
|
||||||
|
return HttpResponse::Conflict()
|
||||||
|
.body("The ADF problem has already been solved with this strategy. You can just get the solution from the problem data directly.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let acs_and_graphs_fut = timeout(
|
||||||
|
COMPUTE_TIME,
|
||||||
|
spawn_blocking(move || {
|
||||||
|
app_state
|
||||||
|
.currently_running
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(running_info.clone());
|
||||||
|
|
||||||
|
#[cfg(feature = "mock_long_computations")]
|
||||||
|
std::thread::sleep(Duration::from_secs(20));
|
||||||
|
|
||||||
|
let mut adf: Adf = simp_adf.into();
|
||||||
|
|
||||||
|
let acs: Vec<Ac> = match adf_problem_input.strategy {
|
||||||
|
Strategy::Complete => adf.complete().collect(),
|
||||||
|
Strategy::Ground => vec![adf.grounded()],
|
||||||
|
Strategy::Stable => adf.stable().collect(),
|
||||||
|
// TODO: INPUT VALIDATION: only allow this for hybrid parsing
|
||||||
|
Strategy::StableCountingA => adf.stable_count_optimisation_heu_a().collect(),
|
||||||
|
// TODO: INPUT VALIDATION: only allow this for hybrid parsing
|
||||||
|
Strategy::StableCountingB => adf.stable_count_optimisation_heu_b().collect(),
|
||||||
|
// TODO: support more than just default heuristics
|
||||||
|
Strategy::StableNogood => adf
|
||||||
|
.stable_nogood(adf_bdd::adf::heuristics::Heuristic::default())
|
||||||
|
.collect(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let acs_and_graphs: Vec<AcAndGraph> = acs
|
||||||
|
.iter()
|
||||||
|
.map(|ac| AcAndGraph {
|
||||||
|
ac: ac.iter().map(|t| t.0.to_string()).collect(),
|
||||||
|
graph: DoubleLabeledGraph::from_adf_and_ac(&adf, Some(ac)),
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
app_state
|
||||||
|
.currently_running
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.remove(&running_info);
|
||||||
|
|
||||||
|
acs_and_graphs
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
spawn(acs_and_graphs_fut.then(move |acs_and_graphs_res| async move {
|
||||||
|
let acs_and_graphs_enum: AcsAndGraphsOpt = match acs_and_graphs_res {
|
||||||
|
Err(err) => AcsAndGraphsOpt::Error(err.to_string()),
|
||||||
|
Ok(Err(err)) => AcsAndGraphsOpt::Error(err.to_string()),
|
||||||
|
Ok(Ok(acs_and_graphs)) => AcsAndGraphsOpt::Some(acs_and_graphs),
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = adf_coll.update_one(doc! { "name": problem_name, "username": username }, match adf_problem_input.strategy {
|
||||||
|
Strategy::Complete => doc! { "$set": { "acs_per_strategy.complete": &acs_and_graphs_enum } },
|
||||||
|
Strategy::Ground => doc! { "$set": { "acs_per_strategy.ground": &acs_and_graphs_enum } },
|
||||||
|
Strategy::Stable => doc! { "$set": { "acs_per_strategy.stable": &acs_and_graphs_enum } },
|
||||||
|
Strategy::StableCountingA => doc! { "$set": { "acs_per_strategy.stable_counting_a": &acs_and_graphs_enum } },
|
||||||
|
Strategy::StableCountingB => doc! { "$set": { "acs_per_strategy.stable_counting_b": &acs_and_graphs_enum } },
|
||||||
|
Strategy::StableNogood => doc! { "$set": { "acs_per_strategy.stable_nogood": &acs_and_graphs_enum } },
|
||||||
|
}, None).await;
|
||||||
|
|
||||||
|
if let Err(err) = result {
|
||||||
|
log::error!("{err}");
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
HttpResponse::Ok().body("Solving started...")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/{problem_name}")]
|
||||||
|
async fn get_adf_problem(
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
identity: Option<Identity>,
|
||||||
|
path: web::Path<String>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let problem_name = path.into_inner();
|
||||||
|
let adf_coll: mongodb::Collection<AdfProblem> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(ADF_COLL);
|
||||||
|
|
||||||
|
let username = match identity.map(|id| id.id()) {
|
||||||
|
Option::None => {
|
||||||
|
return HttpResponse::Unauthorized().body("You need to login to get an ADF problem.")
|
||||||
|
}
|
||||||
|
Some(Err(err)) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Some(Ok(username)) => username,
|
||||||
|
};
|
||||||
|
|
||||||
|
let adf_problem = match adf_coll
|
||||||
|
.find_one(doc! { "name": &problem_name, "username": &username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(err) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(Option::None) => {
|
||||||
|
return HttpResponse::NotFound()
|
||||||
|
.body(format!("ADF problem with name {problem_name} not found."))
|
||||||
|
}
|
||||||
|
Ok(Some(prob)) => prob,
|
||||||
|
};
|
||||||
|
|
||||||
|
HttpResponse::Ok().json(AdfProblemInfo::from_adf_prob_and_tasks(
|
||||||
|
adf_problem,
|
||||||
|
&app_state.currently_running.lock().unwrap(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/{problem_name}")]
|
||||||
|
async fn delete_adf_problem(
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
identity: Option<Identity>,
|
||||||
|
path: web::Path<String>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let problem_name = path.into_inner();
|
||||||
|
let adf_coll: mongodb::Collection<AdfProblem> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(ADF_COLL);
|
||||||
|
|
||||||
|
let username = match identity.map(|id| id.id()) {
|
||||||
|
Option::None => {
|
||||||
|
return HttpResponse::Unauthorized().body("You need to login to get an ADF problem.")
|
||||||
|
}
|
||||||
|
Some(Err(err)) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Some(Ok(username)) => username,
|
||||||
|
};
|
||||||
|
|
||||||
|
match adf_coll
|
||||||
|
.delete_one(doc! { "name": &problem_name, "username": &username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(DeleteResult {
|
||||||
|
deleted_count: 0, ..
|
||||||
|
}) => HttpResponse::InternalServerError().body("Adf Problem could not be deleted."),
|
||||||
|
Ok(DeleteResult {
|
||||||
|
deleted_count: 1, ..
|
||||||
|
}) => HttpResponse::Ok().body("Adf Problem deleted."),
|
||||||
|
Ok(_) => {
|
||||||
|
unreachable!("delete_one removes at most one entry so all cases are covered already")
|
||||||
|
}
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[get("/")]
|
||||||
|
async fn get_adf_problems_for_user(
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
identity: Option<Identity>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let adf_coll: mongodb::Collection<AdfProblem> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(ADF_COLL);
|
||||||
|
|
||||||
|
let username = match identity.map(|id| id.id()) {
|
||||||
|
Option::None => {
|
||||||
|
return HttpResponse::Unauthorized().body("You need to login to get an ADF problem.")
|
||||||
|
}
|
||||||
|
Some(Err(err)) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Some(Ok(username)) => username,
|
||||||
|
};
|
||||||
|
|
||||||
|
let adf_problem_cursor = match adf_coll.find(doc! { "username": &username }, None).await {
|
||||||
|
Err(err) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(cursor) => cursor,
|
||||||
|
};
|
||||||
|
|
||||||
|
let adf_problems: Vec<AdfProblemInfo> = match adf_problem_cursor
|
||||||
|
.map_ok(|adf_problem| {
|
||||||
|
AdfProblemInfo::from_adf_prob_and_tasks(
|
||||||
|
adf_problem,
|
||||||
|
&app_state.currently_running.lock().unwrap(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.try_collect()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(err) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(probs) => probs,
|
||||||
|
};
|
||||||
|
|
||||||
|
HttpResponse::Ok().json(adf_problems)
|
||||||
|
}
|
||||||
37
server/src/config.rs
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Mutex;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use mongodb::Client;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::adf::Strategy;
|
||||||
|
|
||||||
|
pub(crate) const COOKIE_DURATION: actix_web::cookie::time::Duration =
|
||||||
|
actix_web::cookie::time::Duration::minutes(30);
|
||||||
|
pub(crate) const COMPUTE_TIME: Duration = Duration::from_secs(120);
|
||||||
|
|
||||||
|
pub(crate) const ASSET_DIRECTORY: &str = "./assets";
|
||||||
|
|
||||||
|
pub(crate) const DB_NAME: &str = "adf-obdd";
|
||||||
|
pub(crate) const USER_COLL: &str = "users";
|
||||||
|
pub(crate) const ADF_COLL: &str = "adf-problems";
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize)]
|
||||||
|
#[serde(tag = "type", content = "content")]
|
||||||
|
pub(crate) enum Task {
|
||||||
|
Parse,
|
||||||
|
Solve(Strategy),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub(crate) struct RunningInfo {
|
||||||
|
pub(crate) username: String,
|
||||||
|
pub(crate) adf_name: String,
|
||||||
|
pub(crate) task: Task,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct AppState {
|
||||||
|
pub(crate) mongodb_client: Client,
|
||||||
|
pub(crate) currently_running: Mutex<HashSet<RunningInfo>>,
|
||||||
|
}
|
||||||
118
server/src/double_labeled_graph.rs
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
use adf_bdd::adf::Adf;
|
||||||
|
use adf_bdd::datatypes::{Term, Var};
|
||||||
|
|
||||||
|
#[derive(Clone, Deserialize, Serialize, Debug)]
|
||||||
|
/// This is a DTO for the graph output
|
||||||
|
pub struct DoubleLabeledGraph {
|
||||||
|
// number of nodes equals the number of node labels
|
||||||
|
// nodes implicitly have their index as their ID
|
||||||
|
node_labels: HashMap<String, String>,
|
||||||
|
// every node gets this label containing multiple entries (it might be empty)
|
||||||
|
tree_root_labels: HashMap<String, Vec<String>>,
|
||||||
|
lo_edges: Vec<(String, String)>,
|
||||||
|
hi_edges: Vec<(String, String)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DoubleLabeledGraph {
|
||||||
|
pub fn from_adf_and_ac(adf: &Adf, ac: Option<&Vec<Term>>) -> Self {
|
||||||
|
let ac: &Vec<Term> = match ac {
|
||||||
|
Some(ac) => ac,
|
||||||
|
None => &adf.ac,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut node_indices: HashSet<usize> = HashSet::new();
|
||||||
|
let mut new_node_indices: HashSet<usize> = ac.iter().map(|term| term.value()).collect();
|
||||||
|
|
||||||
|
while !new_node_indices.is_empty() {
|
||||||
|
node_indices = node_indices.union(&new_node_indices).copied().collect();
|
||||||
|
new_node_indices = HashSet::new();
|
||||||
|
|
||||||
|
for node_index in &node_indices {
|
||||||
|
let lo_node_index = adf.bdd.nodes[*node_index].lo().value();
|
||||||
|
if !node_indices.contains(&lo_node_index) {
|
||||||
|
new_node_indices.insert(lo_node_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
let hi_node_index = adf.bdd.nodes[*node_index].hi().value();
|
||||||
|
if !node_indices.contains(&hi_node_index) {
|
||||||
|
new_node_indices.insert(hi_node_index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let node_labels: HashMap<String, String> = adf
|
||||||
|
.bdd
|
||||||
|
.nodes
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(i, _)| node_indices.contains(i))
|
||||||
|
.map(|(i, &node)| {
|
||||||
|
let value_part = match node.var() {
|
||||||
|
Var::TOP => "TOP".to_string(),
|
||||||
|
Var::BOT => "BOT".to_string(),
|
||||||
|
_ => adf.ordering.name(node.var()).expect(
|
||||||
|
"name for each var should exist; special cases are handled separately",
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
(i.to_string(), value_part)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let tree_root_labels_with_usize: HashMap<usize, Vec<String>> = ac.iter().enumerate().fold(
|
||||||
|
adf.bdd
|
||||||
|
.nodes
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(i, _)| node_indices.contains(i))
|
||||||
|
.map(|(i, _)| (i, vec![]))
|
||||||
|
.collect(),
|
||||||
|
|mut acc, (root_for, root_node)| {
|
||||||
|
acc.get_mut(&root_node.value())
|
||||||
|
.expect("we know that the index will be in the map")
|
||||||
|
.push(adf.ordering.name(Var(root_for)).expect(
|
||||||
|
"name for each var should exist; special cases are handled separately",
|
||||||
|
));
|
||||||
|
|
||||||
|
acc
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let tree_root_labels: HashMap<String, Vec<String>> = tree_root_labels_with_usize
|
||||||
|
.into_iter()
|
||||||
|
.map(|(i, vec)| (i.to_string(), vec))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let lo_edges: Vec<(String, String)> = adf
|
||||||
|
.bdd
|
||||||
|
.nodes
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(i, _)| node_indices.contains(i))
|
||||||
|
.filter(|(_, node)| ![Var::TOP, Var::BOT].contains(&node.var()))
|
||||||
|
.map(|(i, &node)| (i, node.lo().value()))
|
||||||
|
.map(|(i, v)| (i.to_string(), v.to_string()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let hi_edges: Vec<(String, String)> = adf
|
||||||
|
.bdd
|
||||||
|
.nodes
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|(i, _)| node_indices.contains(i))
|
||||||
|
.filter(|(_, node)| ![Var::TOP, Var::BOT].contains(&node.var()))
|
||||||
|
.map(|(i, &node)| (i, node.hi().value()))
|
||||||
|
.map(|(i, v)| (i.to_string(), v.to_string()))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
DoubleLabeledGraph {
|
||||||
|
node_labels,
|
||||||
|
tree_root_labels,
|
||||||
|
lo_edges,
|
||||||
|
hi_edges,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
116
server/src/main.rs
Normal file
@ -0,0 +1,116 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Mutex;
|
||||||
|
|
||||||
|
use actix_files as fs;
|
||||||
|
use actix_identity::IdentityMiddleware;
|
||||||
|
use actix_session::config::PersistentSession;
|
||||||
|
use actix_session::storage::CookieSessionStore;
|
||||||
|
use actix_session::SessionMiddleware;
|
||||||
|
use actix_web::cookie::Key;
|
||||||
|
use actix_web::dev::{fn_service, ServiceRequest, ServiceResponse};
|
||||||
|
use actix_web::{web, App, HttpServer};
|
||||||
|
use fs::NamedFile;
|
||||||
|
use mongodb::Client;
|
||||||
|
|
||||||
|
#[cfg(feature = "cors_for_local_development")]
|
||||||
|
use actix_cors::Cors;
|
||||||
|
|
||||||
|
mod adf;
|
||||||
|
mod config;
|
||||||
|
mod double_labeled_graph;
|
||||||
|
mod user;
|
||||||
|
|
||||||
|
use adf::{
|
||||||
|
add_adf_problem, delete_adf_problem, get_adf_problem, get_adf_problems_for_user,
|
||||||
|
solve_adf_problem,
|
||||||
|
};
|
||||||
|
use config::{AppState, ASSET_DIRECTORY, COOKIE_DURATION};
|
||||||
|
use user::{
|
||||||
|
create_username_index, delete_account, login, logout, register, update_user, user_info,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[actix_web::main]
|
||||||
|
async fn main() -> std::io::Result<()> {
|
||||||
|
env_logger::builder()
|
||||||
|
.filter_level(log::LevelFilter::Debug)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
// setup mongodb
|
||||||
|
let mongodb_uri =
|
||||||
|
std::env::var("MONGODB_URI").unwrap_or_else(|_| "mongodb://localhost:27017".into());
|
||||||
|
let client = Client::with_uri_str(mongodb_uri)
|
||||||
|
.await
|
||||||
|
.expect("failed to connect to mongodb");
|
||||||
|
create_username_index(&client).await;
|
||||||
|
|
||||||
|
// cookie secret ket
|
||||||
|
let secret_key = Key::generate();
|
||||||
|
|
||||||
|
// needs to be set outside of httpserver closure to only create it once!
|
||||||
|
let app_data = web::Data::new(AppState {
|
||||||
|
mongodb_client: client.clone(),
|
||||||
|
currently_running: Mutex::new(HashSet::new()),
|
||||||
|
});
|
||||||
|
|
||||||
|
HttpServer::new(move || {
|
||||||
|
let app = App::new();
|
||||||
|
|
||||||
|
#[cfg(feature = "cors_for_local_development")]
|
||||||
|
let cors = Cors::default()
|
||||||
|
.allowed_origin("http://localhost:1234")
|
||||||
|
.allow_any_method()
|
||||||
|
.allow_any_header()
|
||||||
|
.supports_credentials()
|
||||||
|
.max_age(3600);
|
||||||
|
|
||||||
|
#[cfg(feature = "cors_for_local_development")]
|
||||||
|
let app = app.wrap(cors);
|
||||||
|
|
||||||
|
#[cfg(feature = "cors_for_local_development")]
|
||||||
|
let cookie_secure = false;
|
||||||
|
#[cfg(not(feature = "cors_for_local_development"))]
|
||||||
|
let cookie_secure = true;
|
||||||
|
|
||||||
|
app.app_data(app_data.clone())
|
||||||
|
.wrap(IdentityMiddleware::default())
|
||||||
|
.wrap(
|
||||||
|
SessionMiddleware::builder(CookieSessionStore::default(), secret_key.clone())
|
||||||
|
.cookie_name("adf-obdd-service-auth".to_owned())
|
||||||
|
.cookie_secure(cookie_secure)
|
||||||
|
.session_lifecycle(PersistentSession::default().session_ttl(COOKIE_DURATION))
|
||||||
|
.build(),
|
||||||
|
)
|
||||||
|
.service(
|
||||||
|
web::scope("/users")
|
||||||
|
.service(register)
|
||||||
|
.service(delete_account)
|
||||||
|
.service(login)
|
||||||
|
.service(logout)
|
||||||
|
.service(user_info)
|
||||||
|
.service(update_user),
|
||||||
|
)
|
||||||
|
.service(
|
||||||
|
web::scope("/adf")
|
||||||
|
.service(add_adf_problem)
|
||||||
|
.service(solve_adf_problem)
|
||||||
|
.service(get_adf_problem)
|
||||||
|
.service(delete_adf_problem)
|
||||||
|
.service(get_adf_problems_for_user),
|
||||||
|
)
|
||||||
|
// this mus be last to not override anything
|
||||||
|
.service(
|
||||||
|
fs::Files::new("/", ASSET_DIRECTORY)
|
||||||
|
.index_file("index.html")
|
||||||
|
.default_handler(fn_service(|req: ServiceRequest| async {
|
||||||
|
let (req, _) = req.into_parts();
|
||||||
|
let file =
|
||||||
|
NamedFile::open_async(format!("{ASSET_DIRECTORY}/index.html")).await?;
|
||||||
|
let res = file.into_response(&req);
|
||||||
|
Ok(ServiceResponse::new(req, res))
|
||||||
|
})),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.bind(("0.0.0.0", 8080))?
|
||||||
|
.run()
|
||||||
|
.await
|
||||||
|
}
|
||||||
365
server/src/user.rs
Normal file
@ -0,0 +1,365 @@
|
|||||||
|
use actix_identity::Identity;
|
||||||
|
use actix_web::{delete, get, post, put, web, HttpMessage, HttpRequest, HttpResponse, Responder};
|
||||||
|
use argon2::password_hash::rand_core::OsRng;
|
||||||
|
use argon2::password_hash::SaltString;
|
||||||
|
use argon2::{Argon2, PasswordHash, PasswordHasher, PasswordVerifier};
|
||||||
|
use mongodb::results::{DeleteResult, UpdateResult};
|
||||||
|
use mongodb::{bson::doc, options::IndexOptions, Client, IndexModel};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::adf::AdfProblem;
|
||||||
|
use crate::config::{AppState, ADF_COLL, DB_NAME, USER_COLL};
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
pub(crate) struct User {
|
||||||
|
pub(crate) username: String,
|
||||||
|
pub(crate) password: Option<String>, // NOTE: Password being None indicates a temporary user
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
struct UserPayload {
|
||||||
|
username: String,
|
||||||
|
password: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
struct UserInfo {
|
||||||
|
username: String,
|
||||||
|
temp: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates an index on the "username" field to force the values to be unique.
|
||||||
|
pub(crate) async fn create_username_index(client: &Client) {
|
||||||
|
let options = IndexOptions::builder().unique(true).build();
|
||||||
|
let model = IndexModel::builder()
|
||||||
|
.keys(doc! { "username": 1 })
|
||||||
|
.options(options)
|
||||||
|
.build();
|
||||||
|
client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection::<User>(USER_COLL)
|
||||||
|
.create_index(model, None)
|
||||||
|
.await
|
||||||
|
.expect("creating an index should succeed");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn username_exists(user_coll: &mongodb::Collection<User>, username: &str) -> bool {
|
||||||
|
user_coll
|
||||||
|
.find_one(doc! { "username": username }, None)
|
||||||
|
.await
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.is_some()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add new user
|
||||||
|
#[post("/register")]
|
||||||
|
async fn register(app_state: web::Data<AppState>, user: web::Json<UserPayload>) -> impl Responder {
|
||||||
|
let mut user: UserPayload = user.into_inner();
|
||||||
|
|
||||||
|
if user.username.is_empty() || user.password.is_empty() {
|
||||||
|
return HttpResponse::BadRequest().body("Username and Password need to be set!");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_coll = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(USER_COLL);
|
||||||
|
|
||||||
|
if username_exists(&user_coll, &user.username).await {
|
||||||
|
return HttpResponse::Conflict()
|
||||||
|
.body("Username is already taken. Please pick another one!");
|
||||||
|
}
|
||||||
|
|
||||||
|
let pw = &user.password;
|
||||||
|
let salt = SaltString::generate(&mut OsRng);
|
||||||
|
let hashed_pw = Argon2::default()
|
||||||
|
.hash_password(pw.as_bytes(), &salt)
|
||||||
|
.expect("Error while hashing password!")
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
user.password = hashed_pw;
|
||||||
|
|
||||||
|
let result = user_coll
|
||||||
|
.insert_one(
|
||||||
|
User {
|
||||||
|
username: user.username,
|
||||||
|
password: Some(user.password),
|
||||||
|
},
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match result {
|
||||||
|
Ok(_) => HttpResponse::Ok().body("Registration successful!"),
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove user
|
||||||
|
#[delete("/delete")]
|
||||||
|
async fn delete_account(
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
identity: Option<Identity>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let user_coll: mongodb::Collection<User> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(USER_COLL);
|
||||||
|
let adf_coll: mongodb::Collection<AdfProblem> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(ADF_COLL);
|
||||||
|
|
||||||
|
match identity {
|
||||||
|
None => HttpResponse::Unauthorized().body("You are not logged in."),
|
||||||
|
Some(id) => match id.id() {
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(username) => {
|
||||||
|
// Delete all adfs created by user
|
||||||
|
match adf_coll
|
||||||
|
.delete_many(doc! { "username": &username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(DeleteResult {
|
||||||
|
deleted_count: _, ..
|
||||||
|
}) => {
|
||||||
|
// Delete actual user
|
||||||
|
match user_coll
|
||||||
|
.delete_one(doc! { "username": &username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(DeleteResult {
|
||||||
|
deleted_count: 0, ..
|
||||||
|
}) => HttpResponse::InternalServerError()
|
||||||
|
.body("Account could not be deleted."),
|
||||||
|
Ok(DeleteResult {
|
||||||
|
deleted_count: 1, ..
|
||||||
|
}) => {
|
||||||
|
id.logout();
|
||||||
|
HttpResponse::Ok().body("Account deleted.")
|
||||||
|
}
|
||||||
|
Ok(_) => unreachable!(
|
||||||
|
"delete_one removes at most one entry so all cases are covered already"
|
||||||
|
),
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Login
|
||||||
|
#[post("/login")]
|
||||||
|
async fn login(
|
||||||
|
req: HttpRequest,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
user_data: web::Json<UserPayload>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let username = &user_data.username;
|
||||||
|
let pw = &user_data.password;
|
||||||
|
|
||||||
|
if username.is_empty() || pw.is_empty() {
|
||||||
|
return HttpResponse::BadRequest().body("Username and Password need to be set!");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_coll: mongodb::Collection<User> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(USER_COLL);
|
||||||
|
match user_coll
|
||||||
|
.find_one(doc! { "username": username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Some(user)) => {
|
||||||
|
let stored_password = match &user.password {
|
||||||
|
None => return HttpResponse::BadRequest().body("Invalid username or password"), // NOTE: login as tremporary user is not allowed
|
||||||
|
Some(password) => password,
|
||||||
|
};
|
||||||
|
|
||||||
|
let stored_hash = PasswordHash::new(stored_password).unwrap();
|
||||||
|
let pw_valid = Argon2::default()
|
||||||
|
.verify_password(pw.as_bytes(), &stored_hash)
|
||||||
|
.is_ok();
|
||||||
|
|
||||||
|
if pw_valid {
|
||||||
|
Identity::login(&req.extensions(), username.to_string()).unwrap();
|
||||||
|
HttpResponse::Ok().body("Login successful!")
|
||||||
|
} else {
|
||||||
|
HttpResponse::BadRequest().body("Invalid email or password")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(None) => HttpResponse::NotFound().body(format!(
|
||||||
|
"No user found with username {}",
|
||||||
|
&user_data.username
|
||||||
|
)),
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[delete("/logout")]
|
||||||
|
async fn logout(app_state: web::Data<AppState>, id: Option<Identity>) -> impl Responder {
|
||||||
|
let user_coll: mongodb::Collection<User> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(USER_COLL);
|
||||||
|
|
||||||
|
match id {
|
||||||
|
None => HttpResponse::Unauthorized().body("You are not logged in."),
|
||||||
|
Some(id) => match id.id() {
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(username) => {
|
||||||
|
let user: User = match user_coll
|
||||||
|
.find_one(doc! { "username": &username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Some(user)) => user,
|
||||||
|
Ok(None) => {
|
||||||
|
return HttpResponse::NotFound()
|
||||||
|
.body(format!("No user found with username {}", &username))
|
||||||
|
}
|
||||||
|
Err(err) => return HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
};
|
||||||
|
|
||||||
|
if user.password.is_none() {
|
||||||
|
HttpResponse::BadRequest().body("You are logged in as a temporary user so we won't log you out because you will not be able to login again. If you want to be able to login again, set a password. Otherwise your session will expire automatically at a certain point.")
|
||||||
|
} else {
|
||||||
|
id.logout();
|
||||||
|
HttpResponse::Ok().body("Logout successful!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get current user
|
||||||
|
#[get("/info")]
|
||||||
|
async fn user_info(app_state: web::Data<AppState>, identity: Option<Identity>) -> impl Responder {
|
||||||
|
let user_coll: mongodb::Collection<User> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(USER_COLL);
|
||||||
|
|
||||||
|
match identity {
|
||||||
|
None => {
|
||||||
|
HttpResponse::Unauthorized().body("You need to login get your account information.")
|
||||||
|
}
|
||||||
|
Some(id) => match id.id() {
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(username) => {
|
||||||
|
match user_coll
|
||||||
|
.find_one(doc! { "username": &username }, None)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(Some(user)) => {
|
||||||
|
let info = UserInfo {
|
||||||
|
username: user.username,
|
||||||
|
temp: user.password.is_none(),
|
||||||
|
};
|
||||||
|
|
||||||
|
HttpResponse::Ok().json(info)
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
id.logout();
|
||||||
|
HttpResponse::NotFound().body("Logged in user does not exist anymore.")
|
||||||
|
}
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update current user
|
||||||
|
#[put("/update")]
|
||||||
|
async fn update_user(
|
||||||
|
req: HttpRequest,
|
||||||
|
app_state: web::Data<AppState>,
|
||||||
|
identity: Option<Identity>,
|
||||||
|
user: web::Json<UserPayload>,
|
||||||
|
) -> impl Responder {
|
||||||
|
let mut user: UserPayload = user.into_inner();
|
||||||
|
|
||||||
|
if user.username.is_empty() || user.password.is_empty() {
|
||||||
|
return HttpResponse::BadRequest().body("Username and Password need to be set!");
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_coll = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(USER_COLL);
|
||||||
|
let adf_coll: mongodb::Collection<AdfProblem> = app_state
|
||||||
|
.mongodb_client
|
||||||
|
.database(DB_NAME)
|
||||||
|
.collection(ADF_COLL);
|
||||||
|
|
||||||
|
match identity {
|
||||||
|
None => {
|
||||||
|
HttpResponse::Unauthorized().body("You need to login get your account information.")
|
||||||
|
}
|
||||||
|
Some(id) => match id.id() {
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(username) => {
|
||||||
|
if user.username != username && username_exists(&user_coll, &user.username).await {
|
||||||
|
return HttpResponse::Conflict()
|
||||||
|
.body("Username is already taken. Please pick another one!");
|
||||||
|
}
|
||||||
|
|
||||||
|
let pw = &user.password;
|
||||||
|
let salt = SaltString::generate(&mut OsRng);
|
||||||
|
let hashed_pw = Argon2::default()
|
||||||
|
.hash_password(pw.as_bytes(), &salt)
|
||||||
|
.expect("Error while hashing password!")
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
user.password = hashed_pw;
|
||||||
|
|
||||||
|
let result = user_coll
|
||||||
|
.replace_one(
|
||||||
|
doc! { "username": &username },
|
||||||
|
User {
|
||||||
|
username: user.username.clone(),
|
||||||
|
password: Some(user.password),
|
||||||
|
},
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match result {
|
||||||
|
Ok(UpdateResult {
|
||||||
|
modified_count: 0, ..
|
||||||
|
}) => HttpResponse::InternalServerError().body("Account could not be updated."),
|
||||||
|
Ok(UpdateResult {
|
||||||
|
modified_count: 1, ..
|
||||||
|
}) => {
|
||||||
|
// re-login with new username
|
||||||
|
Identity::login(&req.extensions(), user.username.clone()).unwrap();
|
||||||
|
|
||||||
|
// update all adf problems of user
|
||||||
|
match adf_coll
|
||||||
|
.update_many(
|
||||||
|
doc! { "username": &username },
|
||||||
|
doc! { "$set": { "username": &user.username } },
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
Ok(UpdateResult {
|
||||||
|
modified_count: _, ..
|
||||||
|
}) => HttpResponse::Ok().json(UserInfo {
|
||||||
|
username: user.username,
|
||||||
|
temp: false,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(_) => unreachable!(
|
||||||
|
"replace_one replaces at most one entry so all cases are covered already"
|
||||||
|
),
|
||||||
|
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||