1
0
mirror of https://github.com/ellmau/adf-obdd.git synced 2025-12-19 09:29:36 +01:00

Add Basic Binary for ICCMA 2025

This commit is contained in:
monsterkrampe 2025-01-09 11:55:33 +01:00
parent ca6c94f2e5
commit ff575f9c4d
No known key found for this signature in database
GPG Key ID: B8ADC1F5A5CE5057
13 changed files with 604 additions and 27 deletions

10
Cargo.lock generated
View File

@ -1,6 +1,6 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
version = 4
[[package]]
name = "actix-codec"
@ -321,6 +321,14 @@ dependencies = [
"strum",
]
[[package]]
name = "adf-bdd-iccma-2025-bin"
version = "0.3.0-dev"
dependencies = [
"adf_bdd",
"clap 4.5.32",
]
[[package]]
name = "adf-bdd-server"
version = "0.3.0"

View File

@ -1,6 +1,7 @@
[workspace]
members=[ "lib", "bin", "server" ]
members=[ "lib", "bin", "server" , "iccma-2025-bin"]
default-members = [ "lib" ]
resolver = "2"
[profile.release]
lto = "fat"

10
iccma-2025-bin/Cargo.toml Normal file
View File

@ -0,0 +1,10 @@
[package]
name = "adf-bdd-iccma-2025-bin"
version = "0.3.0-dev"
authors = ["Stefan Ellmauthaler <stefan.ellmauthaler@tu-dresden.de>", "Lukas Gerlach <lukas.gerlach@tu-dresden.de>"]
edition = "2021"
[dependencies]
adf_bdd = { version="0.3.1", path="../lib", default-features = false }
clap = {version = "4.3.0", features = [ "derive", "cargo", "env" ]}

61
iccma-2025-bin/benchmarks/flake.lock generated Normal file
View File

@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1742751704,
"narHash": "sha256-rBfc+H1dDBUQ2mgVITMGBPI1PGuCznf9rcWX/XIULyE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f0946fa5f1fb876a9dc2e1850d9d3a4e3f914092",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View File

@ -0,0 +1,21 @@
{
description = "ICCMA Benchmarks";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
inputs.flake-utils.url = "github:numtide/flake-utils";
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem
(system:
let pkgs = nixpkgs.legacyPackages.${system}; in
{
devShells.default = pkgs.mkShell {
packages = [
(pkgs.python3.withPackages (python-pkgs: [
python-pkgs.python-sat
]))
];
};
}
);
}

View File

@ -0,0 +1,8 @@
p af 5
# this is a comment
1 2
2 4
4 5
5 4
5 5

View File

@ -0,0 +1 @@
3

View File

@ -0,0 +1 @@
w 1 3 4

View File

@ -0,0 +1,216 @@
"""
Copyright <2023> <Andreas Niskanen, University of Helsinki>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
"""
!!!
!!! IMPORTANT!!!
!!!
This file is NOT the original from ICCMA 2023.
It has been slightly altered and is only used to test our solver implementation with Benchmarks from ICCMA 2023
!!!
!!! IMPORTANT!!!
!!!
"""
import sys
from pysat.formula import CNF
from pysat.formula import IDPool
from pysat.solvers import Glucose4 as Solver
task = sys.argv[1]
af_file = sys.argv[2]
out_file = sys.argv[3]
# if timeout or memout there is nothing to verify
# contents = open(out_file.replace(".out", ".var")).read().split("\n")
# contents = [line for line in contents if not line.startswith("#") and len(line) > 0]
# assert(contents[-3].startswith("TIMEOUT=") and contents[-2].startswith("MEMOUT="))
# contents = [line.split("=")[1] for line in contents]
# if contents[-3] == "true" or contents[-2] == "true":
# print("PASS (timeout)")
# sys.exit(0)
# read the output file and extract potential witness
out_file_contents = open(out_file).read().split("\n")
out_file_contents = [line.strip() for line in out_file_contents if len(line) > 0]
witness = None
if any(line.startswith("w") for line in out_file_contents):
witness_lines = [line for line in out_file_contents if line.startswith("w")]
assert(len(witness_lines) == 1)
witness = list(map(int, witness_lines[0].replace("w", "").strip().split()))
answer = "NA"
if "YES" in out_file_contents:
answer = "YES"
if "NO" in out_file_contents:
answer = "NO"
problem, semantics = task.split("-")
# witness must exist in the following cases
if problem == "SE":
if semantics != "ST" or answer != "NO":
assert(witness is not None)
else:
print("PASS (answer is NO)")
sys.exit(0)
if problem == "DC":
if answer == "YES":
assert(witness is not None)
else:
print("PASS (answer is NO)")
sys.exit(0)
if problem == "DS":
if answer == "NO":
assert(witness is not None)
else:
print("PASS (answer is YES)")
sys.exit(0)
# check that query is in witness
if problem == "DC":
query = int(open(af_file + ".arg").read().strip())
assert(query in witness)
# or that query is not in witness
if problem == "DS":
query = int(open(af_file + ".arg").read().strip())
assert(query not in witness)
# read the original af
af_file_contents = open(af_file).read().split("\n")
af_file_contents = [line.strip() for line in af_file_contents if not line.startswith("#") and len(line) > 0]
p_line = af_file_contents[0]
attack_lines = af_file_contents[1:]
arguments = range(1, int(p_line.replace("p af ", ""))+1)
n = len(arguments)
attacks = [tuple(map(int, line.split())) for line in attack_lines]
attackers = { a : [] for a in arguments }
for a,b in attacks:
attackers[b].append(a)
witness = set(witness)
in_witness = sorted([a for a in witness])
out_witness = sorted([a for a in arguments if a not in witness])
assumptions = in_witness + [-a for a in out_witness]
def cf_encoding(solver):
for a,b in attacks:
solver.add_clause([-a, -b])
def out_encoding(solver):
for a in arguments:
clause = [-(n+a)]
for b in attackers[a]:
solver.add_clause([-b, n+a])
clause.append(b)
solver.add_clause(clause)
def adm_encoding(solver):
cf_encoding(solver)
out_encoding(solver)
for a in arguments:
for b in attackers[a]:
solver.add_clause([-a, n+b])
def com_encoding(solver):
adm_encoding(solver)
for a in arguments:
clause = [a]
for b in attackers[a]:
clause.append(-(n+b))
solver.add_clause(clause)
def stb_encoding(solver):
cf_encoding(solver)
out_encoding(solver)
for a in arguments:
solver.add_clause([a, n+a])
# verify witness
solver = Solver(with_proof=True)
if semantics == "CO":
com_encoding(solver)
assert(solver.solve(assumptions))
elif semantics == "ST":
stb_encoding(solver)
assert(solver.solve(assumptions))
elif semantics == "PR":
# witness is a complete extension
com_encoding(solver)
assert(solver.solve(assumptions))
# no superset is complete
for a in in_witness:
solver.add_clause([a])
solver.add_clause([a for a in out_witness])
assert(not solver.solve())
elif semantics == "SST":
# witness is a complete extension
com_encoding(solver)
for a in arguments:
solver.add_clause([-(2*n+a), a, n+a])
solver.add_clause([2*n+a, -a])
solver.add_clause([2*n+a, -(n+a)])
assert(solver.solve(assumptions))
# extract range of witness
model = solver.get_model()
in_range = [a for a in arguments if model[2*n+a-1] > 0]
out_range = [a for a in arguments if model[2*n+a-1] < 0]
# no range-superset is complete
for a in in_range:
solver.add_clause([2*n+a])
solver.add_clause([2*n+a for a in out_range])
assert(not solver.solve())
elif semantics == "STG":
# witness is a conflict-free extension
cf_encoding(solver)
out_encoding(solver)
for a in arguments:
solver.add_clause([-(2*n+a), a, n+a])
solver.add_clause([2*n+a, -a])
solver.add_clause([2*n+a, -(n+a)])
assert(solver.solve(assumptions))
# extract range of witness
model = solver.get_model()
in_range = [a for a in arguments if model[2*n+a-1] > 0]
out_range = [a for a in arguments if model[2*n+a-1] < 0]
# no range-superset is conflict-free
for a in in_range:
solver.add_clause([2*n+a])
solver.add_clause([2*n+a for a in out_range])
assert(not solver.solve())
elif semantics == "ID":
pass
else:
sys.exit(1)
print("PASS (all tests passed)")
if semantics == "PR" or semantics == "SST" or semantics == "STG":
proof_file = open(out_file.replace(".out", ".drup"), "w")
for line in solver.get_proof():
proof_file.write(line + "\n")
proof_file.close()

213
iccma-2025-bin/src/main.rs Normal file
View File

@ -0,0 +1,213 @@
use adf_bdd::adf::Adf;
use adf_bdd::parser::{AdfParser, Formula};
use clap::{builder, Parser, ValueEnum};
use std::cell::RefCell;
use std::collections::HashMap;
use std::io::BufRead;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
#[derive(ValueEnum, Clone)]
enum Task {
#[value(name = "DC-CO")]
DcCo,
#[value(name = "DC-ST")]
DcSt,
#[value(name = "DC-SST")]
DcSst,
#[value(name = "DS-PR")]
DsPr,
#[value(name = "DS-ST")]
DsSt,
#[value(name = "DS-SST")]
DsSst,
#[value(name = "SE-PR")]
SePr,
#[value(name = "SE-ST")]
SeSt,
#[value(name = "SE-SST")]
SeSst,
#[value(name = "SE-ID")]
SeId,
}
#[derive(Parser)]
#[command(
author,
version,
arg_required_else_help(true),
help_template("{name} {version}\n{author-with-newline}")
)]
struct App {
#[arg(long, exclusive(true))]
problems: bool,
#[arg(short = 'p', value_enum, required_unless_present("problems"))]
task: Option<Task>,
#[arg(short = 'f', value_parser, required_unless_present("problems"))]
input_file: Option<PathBuf>,
#[arg(short = 'a', required_if_eq_any([("task", "DC-CO"), ("task", "DC-ST"), ("task", "DC-SST"), ("task", "DS-PR"), ("task", "DS-ST"), ("task", "DS-SST")]))]
query: Option<usize>,
}
struct AF(Vec<Vec<usize>>);
impl From<AF> for AdfParser {
fn from(source: AF) -> Self {
let names: Vec<String> = (0..source.0.len())
.map(|val| (val + 1).to_string())
.collect();
let dict: HashMap<String, usize> = names
.iter()
.enumerate()
.map(|(i, val)| (val.clone(), i))
.collect();
let formulae: Vec<Formula> = source
.0
.into_iter()
.map(|attackers| {
attackers.into_iter().fold(
// TODO: is it correct to use Top here? what if something is not attacked at all?
Formula::Top,
|acc, attacker| {
Formula::And(
Box::new(acc),
Box::new(Formula::Not(Box::new(Formula::Atom(
(attacker + 1).to_string(),
)))),
)
},
)
})
.collect();
let formulanames = names.clone();
Self {
namelist: Arc::new(RwLock::new(names)),
dict: Arc::new(RwLock::new(dict)),
formulae: RefCell::new(formulae),
formulaname: RefCell::new(formulanames),
}
}
}
fn main() {
let app = App::parse();
if app.problems {
let possible_values: Vec<String> = Task::value_variants()
.into_iter()
.filter_map(Task::to_possible_value)
.map(|pv| builder::PossibleValue::get_name(&pv).to_string())
.collect();
print!("[");
print!("{}", possible_values.join(","));
println!("]")
} else {
let task = app
.task
.expect("Task is required when \"problems\" flag is false.");
let file = app
.input_file
.expect("File is required when \"problems\" flag is false.");
let file = std::fs::File::open(file).expect("Error Reading File");
let mut lines = std::io::BufReader::new(file).lines();
let first_line = lines
.next()
.expect("There must be at least one line in the file")
.expect("Error Reading Line");
let first_line: Vec<_> = first_line.split(" ").collect();
if first_line[0] != "p" || first_line[1] != "af" {
panic!("Expected first line to be of the form: p af <n>");
}
let num_arguments: usize = first_line[2].parse().expect("Could not convert number of arguments to u32; expected first line to be of the form: p af <n>");
let attacks: Vec<(usize, usize)> = lines
.map(|line| line.expect("Error Reading Line"))
.filter(|line| !line.starts_with('#') && !line.is_empty())
.map(|line| {
let mut line = line.split(" ");
let a = line.next()?;
let b = line.next()?;
if line.next().is_some() {
None
} else {
Some((a.parse().ok()?, b.parse().ok()?))
}
})
.map(|res_option| res_option.expect("Line must be of the form: n m"))
.collect();
// index in outer vector represents attacked element
let mut is_attacked_by: Vec<Vec<usize>> = vec![vec![]; num_arguments.try_into().unwrap()];
for (a, b) in attacks {
is_attacked_by[b - 1].push(a - 1); // we normalize names to be zero-indexed
}
let hacked_adf_parser = AdfParser::from(AF(is_attacked_by));
let mut adf = Adf::from_parser(&hacked_adf_parser);
match task {
Task::DcCo => {
unimplemented!()
//let query: usize = app.query.expect("Query is required for current task.");
//let printer = adf.print_dictionary();
//let mut models = adf.complete();
//if let Some(fst) = models.filter(|m| {
// // TODO: filter for query here; use dict somehow
//}).next() {
// println!("YES");
// println!("w {}", printer.print_truthy_statements(&fst).join(" "));
//}
//else {
// println!("NO");
//}
}
Task::DcSt => {
unimplemented!()
}
Task::DcSst => {
unimplemented!()
}
Task::DsPr => {
unimplemented!()
}
Task::DsSt => {
unimplemented!()
}
Task::DsSst => {
unimplemented!()
}
Task::SePr => {
let printer = adf.print_dictionary();
let models = adf.preferred();
if let Some(fst) = models.first() {
println!("w {}", printer.print_truthy_statements(&fst).join(" "));
} else {
println!("NO");
}
}
Task::SeSt => {
let printer = adf.print_dictionary();
let mut models = adf.stable();
if let Some(fst) = models.next() {
println!("w {}", printer.print_truthy_statements(&fst).join(" "));
} else {
println!("NO");
}
}
Task::SeSst => {
// from my understanding, semi-stable is the same as grounded
let printer = adf.print_dictionary();
let model = adf.grounded();
println!("w {}", printer.print_truthy_statements(&model).join(" "));
}
Task::SeId => {
unimplemented!()
}
}
}
}

View File

@ -698,6 +698,29 @@ impl Adf {
})
}
/// Computes the preferred models
/// Returns a Vector which contains all preferred models
pub fn preferred(&mut self) -> Vec<Vec<Term>> {
let mut result: Vec<Vec<Term>> = vec![];
let mut max_count: usize = 0;
self.complete().for_each(|model| {
let count = model
.iter()
.filter(|t| t.is_truth_value() && t.is_true())
.count();
if count > max_count {
max_count = count;
result = vec![model];
} else if count == max_count {
result.push(model)
}
});
result
}
/// Returns a [Vector][std::vec::Vec] of [ModelCounts][crate::datatypes::ModelCounts] for each acceptance condition.
///
/// `memoization` controls whether memoization is utilised or not.

View File

@ -89,6 +89,20 @@ impl PrintDictionary {
{
PrintableInterpretation::new(interpretation, &self.ordering)
}
/// get the truthy statements from an interpretation
pub fn print_truthy_statements(&self, interpretation: &[Term]) -> Vec<String> {
interpretation
.iter()
.enumerate()
.filter(|(_, term)| term.is_truth_value() && term.is_true())
.map(|(pos, _)| {
self.ordering
.name(Var(pos))
.expect("Variable originates from same parser object as the ordering")
})
.collect()
}
}
/// A struct to print a representation, it will be instantiated by [Adf][crate::adf::Adf] by calling the method [print_interpretation][`crate::adf::Adf::print_interpretation`].

View File

@ -20,28 +20,28 @@ use crate::datatypes::adf::VarContainer;
/// A representation of a formula, still using the strings from the input.
#[derive(Clone, PartialEq, Eq)]
pub enum Formula<'a> {
pub enum Formula {
/// `c(f)` in the input format.
Bot,
/// `c(v)` in the input format.
Top,
/// Some atomic variable in the input format.
Atom(&'a str),
Atom(String),
/// Negation of a subformula.
Not(Box<Formula<'a>>),
Not(Box<Formula>),
/// Conjunction of two subformulae.
And(Box<Formula<'a>>, Box<Formula<'a>>),
And(Box<Formula>, Box<Formula>),
/// Disjunction of two subformulae.
Or(Box<Formula<'a>>, Box<Formula<'a>>),
Or(Box<Formula>, Box<Formula>),
/// Implication of two subformulae.
Imp(Box<Formula<'a>>, Box<Formula<'a>>),
Imp(Box<Formula>, Box<Formula>),
/// Exclusive-Or of two subformulae.
Xor(Box<Formula<'a>>, Box<Formula<'a>>),
Xor(Box<Formula>, Box<Formula>),
/// If and only if connective between two formulae.
Iff(Box<Formula<'a>>, Box<Formula<'a>>),
Iff(Box<Formula>, Box<Formula>),
}
impl Formula<'_> {
impl Formula {
pub(crate) fn to_boolean_expr(
&self,
) -> biodivine_lib_bdd::boolean_expression::BooleanExpression {
@ -90,7 +90,7 @@ impl Formula<'_> {
}
}
impl std::fmt::Debug for Formula<'_> {
impl std::fmt::Debug for Formula {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Formula::Atom(a) => {
@ -132,14 +132,14 @@ impl std::fmt::Debug for Formula<'_> {
///
/// Note that the parser can be utilised by an [ADF][`crate::adf::Adf`] to initialise it with minimal overhead.
#[derive(Debug)]
pub struct AdfParser<'a> {
namelist: Arc<RwLock<Vec<String>>>,
dict: Arc<RwLock<HashMap<String, usize>>>,
formulae: RefCell<Vec<Formula<'a>>>,
formulaname: RefCell<Vec<String>>,
pub struct AdfParser {
pub namelist: Arc<RwLock<Vec<String>>>,
pub dict: Arc<RwLock<HashMap<String, usize>>>,
pub formulae: RefCell<Vec<Formula>>,
pub formulaname: RefCell<Vec<String>>,
}
impl Default for AdfParser<'_> {
impl Default for AdfParser {
fn default() -> Self {
AdfParser {
namelist: Arc::new(RwLock::new(Vec::new())),
@ -150,10 +150,7 @@ impl Default for AdfParser<'_> {
}
}
impl<'a, 'b> AdfParser<'b>
where
'a: 'b,
{
impl<'a> AdfParser {
#[allow(dead_code)]
fn parse_statements(&'a self) -> impl FnMut(&'a str) -> IResult<&'a str, ()> {
move |input| {
@ -212,9 +209,9 @@ where
}
}
impl<'a> AdfParser<'a> {
impl AdfParser {
/// Creates a new parser, utilising the already existing [VarContainer]
pub fn with_var_container(var_container: VarContainer) -> AdfParser<'a> {
pub fn with_var_container(var_container: VarContainer) -> AdfParser {
AdfParser {
namelist: var_container.names(),
dict: var_container.mappings(),
@ -224,7 +221,7 @@ impl<'a> AdfParser<'a> {
}
}
impl AdfParser<'_> {
impl AdfParser {
/// after an update to the namelist, all indizes are updated
fn regenerate_indizes(&self) {
self.namelist
@ -284,7 +281,7 @@ impl AdfParser<'_> {
}
fn atomic_term(input: &str) -> IResult<&str, Formula> {
AdfParser::atomic(input).map(|(input, result)| (input, Formula::Atom(result)))
AdfParser::atomic(input).map(|(input, result)| (input, Formula::Atom(result.to_string())))
}
fn formula(input: &str) -> IResult<&str, Formula> {
@ -504,7 +501,10 @@ mod test {
assert_eq!(parser.dict_value("b"), Some(2usize));
assert_eq!(
format!("{:?}", parser.ac_at(1).unwrap()),
format!("{:?}", Formula::Not(Box::new(Formula::Atom("a"))))
format!(
"{:?}",
Formula::Not(Box::new(Formula::Atom("a".to_string())))
)
);
assert_eq!(parser.formula_count(), 3);
assert_eq!(parser.formula_order(), vec![0, 2, 1]);