From 72387c8b864dc8f9877bb5a2e8c1339fa8d47a94 Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Sat, 13 Jan 2024 16:18:35 -0800 Subject: [PATCH 01/15] [WIP] Implement SAS+ operator parsing --- oo_scoping/sas/nbs/parse_sas.ipynb | 77 ++++++++++++++++++------------ 1 file changed, 46 insertions(+), 31 deletions(-) diff --git a/oo_scoping/sas/nbs/parse_sas.ipynb b/oo_scoping/sas/nbs/parse_sas.ipynb index e4e746d..b858b5f 100644 --- a/oo_scoping/sas/nbs/parse_sas.ipynb +++ b/oo_scoping/sas/nbs/parse_sas.ipynb @@ -262,7 +262,7 @@ " self.parse_axioms()\n", " self.parse_operators()\n", "\n", - " \n", + "\n", " def parse_version(self) -> str:\n", " pattern_version = \"begin_version\\n(?P\\d+)\\nend_version\"\n", " versions = re.findall(pattern_version, self.s_sas)\n", @@ -378,7 +378,7 @@ " raise ValueError(f\"The sas file claims to have {n_operators} operators, but we found {len(operators)}\")\n", " self.operators = tuple(operators)\n", " return self.operators\n", - " \n", + "\n", " def parse_effect(self, s: str) -> SasEffect:\n", " # n_cond = int(re.match(\"(?P\\d+).+\", s).group(\"n_cond\"))\n", " s_split = s.split(\" \")\n", @@ -411,10 +411,10 @@ " affected_var_condition=val_cond,\n", " result_val=num_val_result\n", " )\n", - " \n", "\n", "\n", - " \n", + "\n", + "\n", " # Helper functions\n", " ## Getting SasVarValPairs\n", " def get_sas_var_val_pair_from_ints(\n", @@ -460,7 +460,7 @@ " ]\n", " return \"\\n\".join(pieces) + \"\\n\"\n", "\n", - " \n", + "\n", " def generate_version_and_metric_sections(self) -> str:\n", " return f\"begin_version\\n{self.version}\\nend_version\\nbegin_metric\\n{self.metric}\\nend_metric\"\n", "\n", @@ -529,9 +529,9 @@ " ]\n", " # Add prevail conditions\n", " pieces.extend([self.var_val_pair2sas_str(p) for p in o.prevail])\n", - " \n", + "\n", " # Add effects\n", - " pieces.append(str(len(o.effects))) \n", + " pieces.append(str(len(o.effects)))\n", " pieces.extend([self.effect2sas_str(e) for e in o.effects])\n", "\n", " # Add cost\n", @@ -572,7 +572,7 @@ " pieces.append(f\"{i_var} {a.affected_var_condition} {a.result_val}\")\n", " pieces.append(\"end_rule\")\n", " return \"\\n\".join(pieces)\n", - " \n", + "\n", " # Check parse\n", " def check_parse(self) -> bool:\n", " return self.generate_sas() == self.s_sas\n", @@ -593,28 +593,43 @@ " def varvalpair2condition(self, p: SasVarValPair) -> z3.BoolRef:\n", " return self.z3var(p.var) == p.val\n", "\n", - " def sasoperator2skills(self, a: SasOperator) -> Tuple[SkillPDDL,...]:\n", - " \"\"\"\n", - " Partition effects by matching effectconditions*\n", - " Return a skill for each of these combinations\n", - " We can do this partitioning pre-z3\n", - "\n", - " *Not quite. What we actually need is:\n", - " 1. Partition by condition\n", - " 2. Look at the powerset of this partition\n", - " 3. Discard impossible combinations\n", - " 4. Return a skill for each remaining combo\n", - "\n", - " Alternatively:\n", - "\n", - " 1. Powerset of effects\n", - " 2. Discard impossible sets. If c0 => c1, then we can't have (c0, ~c1)\n", - "\n", - "\n", - " We also need to take into account axioms.\n", - " \"\"\"\n", - " \n", - " raise NotImplementedError\n" + " def sasoperator2skills(self, a: SasOperator) -> SkillPDDL:\n", + " # Start with prevail conditions and effect preconditions\n", + " preconditions = list(a.prevail)\n", + " for effect in a.effects:\n", + " if effect.affected_var_condition is not None and effect.affected_var_condition != -1:\n", + " preconditions.append(effect.affected_var_condition_pair)\n", + "\n", + " # Add mutex-derived preconditions\n", + " for mutex in self.sas_mutexes:\n", + " for pair in mutex.facts:\n", + " if pair in preconditions:\n", + " # For each other pair in the mutex group, add a precondition that it's not true\n", + " other_pairs = [p for p in mutex.facts if p != pair]\n", + " for other_pair in other_pairs:\n", + " # Here we add a precondition to ensure the other pairs are not true\n", + " preconditions.append(self.negate_condition(other_pair))\n", + "\n", + " # Convert preconditions to z3.ExprRef\n", + " precondition_expr = z3.And([z3.Bool(f\"{p.var.nm} == {p.val_nm}\") for p in preconditions])\n", + "\n", + " # Convert effects to EffectTypePDDL\n", + " effects = []\n", + " for effect in a.effects:\n", + " affected_var_str = effect.result_var_val_pair.var.nm\n", + " index = effect.result_var_val_pair.val # The index in the SasVar.vals list\n", + " pvar = z3.Bool(affected_var_str)\n", + " effect_pddl = EffectTypePDDL(pvar=pvar, index=index)\n", + " effects.append(effect_pddl)\n", + "\n", + " # Create the SkillPDDL\n", + " action_name = a.nm\n", + " skill = SkillPDDL(precondition=precondition_expr, action=action_name, effects=effects)\n", + "\n", + " return skill\n", + "\n", + " def negate_condition(self, pair: SasVarValPair) -> z3.ExprRef:\n", + " return z3.Not(z3.Bool(f\"{pair.var.nm} == {pair.val_nm}\"))\n" ] }, { @@ -794,7 +809,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.7" + "version": "3.9.13" }, "orig_nbformat": 4, "vscode": { From 8d4ba21db9afccafdad62446e03d286bad493d1c Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Thu, 22 Dec 2022 19:45:08 -0500 Subject: [PATCH 02/15] [WIP] begin adding sas parser --- oo_scoping/downward_translate/sas_tasks.py | 23 +++++++++++++++++++ .../downward_translate/translate_and_scope.py | 13 +++++++++-- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/oo_scoping/downward_translate/sas_tasks.py b/oo_scoping/downward_translate/sas_tasks.py index 619851b..dbd93b2 100644 --- a/oo_scoping/downward_translate/sas_tasks.py +++ b/oo_scoping/downward_translate/sas_tasks.py @@ -94,6 +94,20 @@ def output(self, stream): for axiom in self.axioms: axiom.output(stream) + def load(self, filename='output.sas'): + with open(filename, 'r') as stream: + lines = stream.readlines() + + assert lines.pop(0) == "begin_version" + assert int(lines.pop(0)) == SAS_FILE_VERSION + assert lines.pop(0) == "end_version" + assert lines.pop(0) == "begin_metric" + self.metric = int(lines.pop(0)) + assert lines.pop(0) == "end_metric" + self.variables = SASVariables().parse(lines) + n_mutexes = lines.pop(0) + + def get_encoding_size(self): task_size = 0 task_size += self.variables.get_encoding_size() @@ -167,6 +181,15 @@ def output(self, stream): print(value, file=stream) print("end_variable", file=stream) + def parse(self, lines): + n_vars = int(lines.pop(0)) + self.ranges = [] + self.axiom_layers = [] + self.value_names = [] + for i in range(n_vars): + assert lines.pop(0) == "begin_variable" + pass + def get_encoding_size(self): # A variable with range k has encoding size k + 1 to also give the # variable itself some weight. diff --git a/oo_scoping/downward_translate/translate_and_scope.py b/oo_scoping/downward_translate/translate_and_scope.py index 33289ef..89a9453 100755 --- a/oo_scoping/downward_translate/translate_and_scope.py +++ b/oo_scoping/downward_translate/translate_and_scope.py @@ -782,6 +782,15 @@ def main(): with open(options.sas_file, "w") as output_file: sas_task.output(output_file) + scope_sas(sas_task) + +def scope_sas(sas_task=None): + if sas_task is None: + with timers.timing("Writing output SAS file"): + with open(options.sas_file, "r") as input_file: + sas_task : .output(input_file) + + timer = timers.Timer() if options.scope: # This below block of code performs task scoping on the SAS+ domain. str2var_dict = scoping_sas_parser.make_str2var_dict(sas_task.variables) @@ -817,7 +826,7 @@ def main(): # These are used to estimate size of effective state space of scoped domain. # We do this because we do not in fact remove fluents from the sas+ domain, we only remove operators def get_effectively_relevant_pvars( - cae_triples: Iterable[SkillPDDL], action_names=None + cae_triples: Iterable[SkillPDDL], action_names = None ): """ :param cae_triples: operators from original domain @@ -914,7 +923,7 @@ def main_from_other_script(**kwargs): # Reserve about 10 MB of emergency memory. # https://stackoverflow.com/questions/19469608/ emergency_memory = b"x" * 10**7 - main() + scope_sas() # main() except MemoryError: From 7b6c56ede6d9840ccde7cbc08963f8df75441e77 Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Tue, 16 Jan 2024 22:04:09 -0800 Subject: [PATCH 03/15] Cleanup notebook --- oo_scoping/sas/nbs/parse_sas.ipynb | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/oo_scoping/sas/nbs/parse_sas.ipynb b/oo_scoping/sas/nbs/parse_sas.ipynb index b858b5f..a6f2406 100644 --- a/oo_scoping/sas/nbs/parse_sas.ipynb +++ b/oo_scoping/sas/nbs/parse_sas.ipynb @@ -211,6 +211,9 @@ "# |export\n", "\n", "\n", + "from oo_scoping.downward_translate import sas_tasks as fd\n", + "\n", + "\n", "class SasParser:\n", " \"\"\"\n", " Parse sas planning files into python datastructures.\n", @@ -593,7 +596,7 @@ " def varvalpair2condition(self, p: SasVarValPair) -> z3.BoolRef:\n", " return self.z3var(p.var) == p.val\n", "\n", - " def sasoperator2skills(self, a: SasOperator) -> SkillPDDL:\n", + " def sasoperator2skill(self, a: SasOperator) -> SkillPDDL:\n", " # Start with prevail conditions and effect preconditions\n", " preconditions = list(a.prevail)\n", " for effect in a.effects:\n", @@ -611,7 +614,7 @@ " preconditions.append(self.negate_condition(other_pair))\n", "\n", " # Convert preconditions to z3.ExprRef\n", - " precondition_expr = z3.And([z3.Bool(f\"{p.var.nm} == {p.val_nm}\") for p in preconditions])\n", + " precondition_expr = z3.And(*[self.varvalpair2condition(p) for p in preconditions])\n", "\n", " # Convert effects to EffectTypePDDL\n", " effects = []\n", @@ -629,7 +632,10 @@ " return skill\n", "\n", " def negate_condition(self, pair: SasVarValPair) -> z3.ExprRef:\n", - " return z3.Not(z3.Bool(f\"{pair.var.nm} == {pair.val_nm}\"))\n" + " return z3.Not(self.varvalpair2condition(pair))\n", + "\n", + " def scope(self):\n", + " pass\n" ] }, { From cc7635e29d79e0d9877ec519a18211aab41e324c Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Tue, 16 Jan 2024 22:04:34 -0800 Subject: [PATCH 04/15] Export sas_parser from notebook to pure python --- oo_scoping/sas_parser.py | 553 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 553 insertions(+) create mode 100644 oo_scoping/sas_parser.py diff --git a/oo_scoping/sas_parser.py b/oo_scoping/sas_parser.py new file mode 100644 index 0000000..bd67f16 --- /dev/null +++ b/oo_scoping/sas_parser.py @@ -0,0 +1,553 @@ +from __future__ import annotations +import re, os +from dataclasses import dataclass +from typing import Dict, Tuple, List, Set, NewType, Optional, Iterable, TypeVar, Set +import itertools + +import z3 + +from oo_scoping.skill_classes import SkillPDDL, EffectTypePDDL + +SasVarVal = NewType("SasVarVal", str) + +@dataclass(frozen=True, order=True) +class SasVar: + nm: str + axiom_layer: int + range: int + vals: Tuple[SasVarVal,...] + + @staticmethod + def from_regex_tuple(m: Tuple[str, str, str, str]) -> SasVar: + return SasVar(m[0], int(m[1]), int(m[2]), SasVar.split_values(m[3])) + + @staticmethod + def split_values(s: str) -> Tuple[SasVarVal, ...]: + return tuple([SasVarVal(x) for x in s.split("\n")]) + + def get_var_val_pair(self, i: int) -> SasVarValPair: + return SasVarValPair(self, i) + + +@dataclass(frozen=True, order=True) +class SasVarValPair: + var: SasVar + # val: SasVarVal + val: int + + @property + def val_nm(self) -> SasVarVal: + return self.var.vals[self.val] + +@dataclass(frozen=True, order=True) +class SasEffect: + """ + Sas files distinguish between the condition on non-affected vars, + and the condition on the affected var. + Note that the var in condition_affected_var + and the var in result must be the same + It is maybe wasteful to keep it separate + """ + condition: Tuple[SasVarValPair,...] + affected_var: SasVar + affected_var_condition: Optional[int] + result_val: int + + @property + def affected_var_condition_pair(self) -> Optional[SasVarValPair]: + if self.affected_var_condition is None: + return None + else: + return SasVarValPair(self.affected_var, self.affected_var_condition) + + @property + def result_var_val_pair(self) -> SasVarValPair: + return SasVarValPair(self.affected_var, self.result_val) + + @property + def full_condition(self) -> Tuple[SasVarValPair,...]: + """ + Combination of non-affected var condition + and affected var condition + MF: Should we sort by var? Nah. + """ + if self.affected_var_condition is None: + return self.condition + else: + return self.condition + [self.affected_var_condition_pair] + + +@dataclass(frozen=True, order=True) +class SasOperator: + nm: str + prevail: Tuple[SasVarValPair,...] + effects: Tuple[SasEffect,...] + cost: int = 1 #Default to 1, in case we don't use action cost + + +"""An axiom is basically an effect that is applied every timestep, if applicable""" +class SasAxiom(SasEffect): + pass + +@dataclass(frozen=True, order=True) +class SasMutex: + facts: Tuple[SasVarValPair,...] + + +@dataclass(frozen=True, order=True) +class SasPartialState: + """It would be nice to enforce uniqueness of keys""" + var_value_pairs: Tuple[SasVarValPair,...] + + def __getitem__(self, key: SasVar) -> SasVarVal: + candidates = [x.val for x in self.var_value_pairs if x.var == key] + return candidates[0] + + +class SasState(SasPartialState): + """It would be nice to enforce full specification of vars""" + pass + + +class SasParser: + """ + Parse sas planning files into python datastructures. + There are three sets of methods: + + 1. Parsing functions. One for each section, and one to string them together. + 2. Helper functions + 3. File writing functions + """ + # Regex patterns used in parsing + pattern_var_val_pair = "(?P\d+) (?P\d+)" + pattern_operator = "begin_operator\n(?P.+)\n(?P.+)\n(?P(\d+ \d+\n)*?)(?P\d+)\n(?P[\s\S]+?)\n(?P\d+)\nend_operator" + + # Type annotations for parsed values + s_sas: str + sas_vars: Tuple[SasVar, ...] + sas_operators: Tuple[SasOperator,...] + sas_mutexes: Tuple[SasMutex,...] + sas_axioms: Tuple[SasAxiom,...] + initial_state: SasState + goal: SasPartialState + _z3vars: Dict[SasVar, z3.Int] + + def __init__(self, s_sas: Optional[str] = None, pth: Optional[str] = None) -> None: + """ + Specify either s_sas or pth. + + """ + if s_sas is not None: + self.s_sas = s_sas + elif pth is not None: + with open(pth, "r") as f: + self.s_sas = f.read() + else: + raise ValueError( + "Please specify either s_sas or the pth of the sas file when creating a SasParser." + ) + self._z3vars: Dict[SasVar, z3.Int] = dict() + + # Parsing functions + def parse(self): + """Do entire parsing""" + self.parse_version() + self.parse_metric() + self.parse_vars() + self.parse_mutex() + self.parse_initial_state() + self.parse_goal() + self.parse_axioms() + self.parse_operators() + + + def parse_version(self) -> str: + pattern_version = "begin_version\n(?P\d+)\nend_version" + versions = re.findall(pattern_version, self.s_sas) + if len(versions) != 1: + raise ValueError(f"File specifies {len(versions)} versions. It should specify 1.") + self.version = versions[0][0] + return self.version + + def parse_metric(self) -> int: + """The metric should be 0 or 1""" + pattern_metric = "begin_metric\n(?P\d+)\nend_metric" + metrics = re.findall(pattern_metric, self.s_sas) + if len(metrics) != 1: + raise ValueError(f"File specifies {len(metrics)} metrics. It should specify 1.") + self.metric = int(metrics[0][0]) + return self.metric + + + def parse_vars(self) -> Tuple[SasVar, ...]: + # Regex notes: [\s\S] matches all characters, including newlines + # Putting ? after a quantifier makes it non-greedy, so that it stops as soon as it can + var_pattern = "begin_variable\n(?P.*)\n(?P.*)\n(?P.*)\n(?P[\s\S]*?)\nend_variable" + # TODO use finditer instead, so that we can use named capture groups. + # Less error prone, more clear + matches = re.findall(var_pattern, self.s_sas) + sas_vars: List[SasVar] = [] + for m in matches: + sas_vars.append(SasVar.from_regex_tuple(m)) + sas_vars = tuple(sas_vars) + self.sas_vars = sas_vars + return sas_vars + + def parse_mutex(self) -> Tuple[SasMutex,...]: + """ + Must be run after parse_vars + """ + mutex_pattern = ( + "begin_mutex_group\n(?P\d+)\n(?P[\s\S]*?)\nend_mutex_group" + ) + mutexes_lst: List[SasMutex] = [] + + for mutex_group_match in re.finditer(mutex_pattern, self.s_sas): + facts_lst: List[SasVarValPair] = [] + facts_strs = mutex_group_match.group("facts").splitlines() + for fs in facts_strs: + fact = self.get_sas_var_val_pair_from_str(fs) + facts_lst.append(fact) + mutex = SasMutex(tuple(facts_lst)) + mutexes_lst.append(mutex) + + mutexes = tuple(mutexes_lst) + self.mutexes = mutexes + return mutexes + + def parse_initial_state(self) -> SasState: + pattern_initial_state = "begin_state\n(?P[\s\S]+?)\nend_state" + s_state = re.search(pattern_initial_state, self.s_sas).group("state") + vals = s_state.splitlines() + assert len(vals) == len(self.sas_vars) + var_val_pairs = tuple([self.sas_vars[i].get_var_val_pair(int(vals[i])) for i in range(len(vals))]) + self.initial_state = SasState(var_val_pairs) + return self.initial_state + + def parse_goal(self) -> SasPartialState: + pattern_goal = "begin_goal\n(?P\d+)\n(?P[\s\S]+?)\nend_goal" + s_goal = re.search(pattern_goal, self.s_sas).group("var_vals") + var_val_strs = s_goal.splitlines() + var_val_pairs = tuple([self.get_sas_var_val_pair_from_str(s) for s in var_val_strs]) + self.goal = SasPartialState(var_val_pairs) + return self.goal + + def parse_axioms(self) -> Tuple[SasAxiom,...]: + pattern_head = "(?P\d+) (?P\d+) (?P\d+)" + pattern_axiom = f"begin_rule\n(?P\d+)\n(?P[\s\S]+?)\n{pattern_head}\nend_rule" + axioms_lst: List[SasAxiom] = [] + for m_axiom in re.finditer(pattern_axiom, self.s_sas): + conds_strs = m_axiom.group("conditions").splitlines() + conds = [self.get_sas_var_val_pair_from_str(c) for c in conds_strs] + i_affected_var = int(m_axiom.group("var_num")) + i_val_old = int(m_axiom.group("val_num_old")) + i_val_new = int(m_axiom.group("val_num_new")) + affected_var = self.sas_vars[i_affected_var] + axioms_lst.append( + SasAxiom( + condition=tuple(conds), + affected_var=affected_var, + affected_var_condition=affected_var.vals[i_val_old], + result_val=affected_var.vals[i_val_new], + ) + ) + self.axioms = tuple(axioms_lst) + return self.axioms + + + def parse_operators(self) -> Tuple[SasOperator,...]: + pattern_operator_count = "end_goal\n(?P\d+)\nbegin_operator" + n_operators = int(re.search(pattern_operator_count, self.s_sas).group("n")) + + operators: List[SasOperator] = [] + for m in re.finditer(SasParser.pattern_operator, self.s_sas): + prevail_lines = m.group("prevail").splitlines() + prevail = tuple([self.get_sas_var_val_pair_from_str(x) for x in prevail_lines]) + effect_lines = m.group("effects").splitlines() + effects = tuple([self.parse_effect(x) for x in effect_lines]) + operators.append(SasOperator( + nm = m.group("nm"), + prevail = prevail, + effects=effects, + cost=m.group("cost") + + )) + if len(operators) != n_operators: + raise ValueError(f"The sas file claims to have {n_operators} operators, but we found {len(operators)}") + self.operators = tuple(operators) + return self.operators + + def parse_effect(self, s: str) -> SasEffect: + # n_cond = int(re.match("(?P\d+).+", s).group("n_cond")) + s_split = s.split(" ") + n_cond = int(s_split[0]) + + conds: List[SasVarValPair] = [] + i_conds_start = 1 + for i_pair in range(n_cond): + num_var = int(s_split[i_conds_start + i_pair*2]) + num_val = int(s_split[i_conds_start + (i_pair*2) + 1]) + conds.append(self.get_sas_var_val_pair_from_ints(num_var, num_val)) + + i_conds_end = i_conds_start + n_cond * 2 + num_var_affected = int(s_split[i_conds_end]) + var_affected = self.sas_vars[num_var_affected] + num_val_cond = int(s_split[i_conds_end + 1]) + # -1 means that there is no condition on the affected var + if num_val_cond == -1: + val_cond = None + else: + val_cond = var_affected.vals[num_val_cond] + + num_val_result = int(s_split[i_conds_end + 2]) + if i_conds_end + 2 != len(s_split) - 1: + raise ValueError("We miscounted") + + return SasEffect( + condition=tuple(conds), + affected_var=var_affected, + affected_var_condition=val_cond, + result_val=num_val_result + ) + + # Helper functions + ## Getting SasVarValPairs + def get_sas_var_val_pair_from_ints( + self, var_num: int, val_num: int + ) -> SasVarValPair: + var0 = self.sas_vars[var_num] + # val0 = var0.vals[val_num] + return SasVarValPair(var0, val_num) + + @staticmethod + def get_var_val_nums_from_str(s: str) -> Tuple[int, int]: + m = re.match(SasParser.pattern_var_val_pair, s) + if m is None: + raise ValueError(f"The string is not a pair of ints:\n{s}") + var_num = int(m.group("var_num")) + val_num = int(m.group("val_num")) + return var_num, val_num + + def get_sas_var_val_pair_from_str(self, s: str) -> SasVarValPair: + var_num, val_num = SasParser.get_var_val_nums_from_str(s) + return self.get_sas_var_val_pair_from_ints(var_num, val_num) + + + def var_val_pair2ints(self, p: SasVarValPair) -> Tuple[int, int]: + """Returns the pair of ints a sas file would use to represent p""" + i_var = self.sas_vars.index(p.var) + # i_val = p.var.vals.index(p.val) + return (i_var, p.val) + + def var_val_pair2sas_str(self, p: SasVarValPair) -> str: + return " ".join(map(str, self.var_val_pair2ints(p))) + + # Writing back to SAS + def generate_sas(self) -> str: + pieces: List[str] = [ + self.generate_version_and_metric_sections(), + self.generate_variables_section(), + self.generate_mutexes_section(), + self.generate_initial_state_section(), + self.generate_goal_section(), + self.generate_operators_section(), + self.generate_axioms_section() + ] + return "\n".join(pieces) + "\n" + + + def generate_version_and_metric_sections(self) -> str: + return f"begin_version\n{self.version}\nend_version\nbegin_metric\n{self.metric}\nend_metric" + + ## Variables + def generate_variables_section(self) -> str: + pieces: List[str] = [str(len(self.sas_vars))] + for v in self.sas_vars: + pieces.append(self.generate_variable_str(v)) + return "\n".join(pieces) + + def generate_variable_str(self, v: SasVar) -> str: + pieces: List[str] = [ + "begin_variable", + v.nm, + str(v.axiom_layer), + str(v.range) + ] + for val in v.vals: + pieces.append(val) + pieces.append("end_variable") + return "\n".join(pieces) + + ## Mutexes + def generate_mutexes_section(self) -> str: + pieces: List[str] = [str(len(self.mutexes))] + pieces.extend([self.generate_mutex_str(m) for m in self.mutexes]) + return "\n".join(pieces) + + def generate_mutex_str(self, m: SasMutex) -> str: + pieces: List[str] = ["begin_mutex_group"] + pieces.append(str(len(m.facts))) + # raise NotImplementedError + for f in m.facts: + i_var = self.sas_vars.index(f.var) + # i_val = f.var.vals.index(f.val) + pieces.append(f"{i_var} {f.val}") + pieces.append("end_mutex_group") + return "\n".join(pieces) + + ## Initial State + def generate_initial_state_section(self) -> str: + pieces: List[str] = ["begin_state"] + # pieces.extend([self.var_val_pair2sas_str(p) for p in self.initial_state.var_value_pairs]) + pieces.extend([str(p.val) for p in self.initial_state.var_value_pairs]) + pieces.append("end_state") + return "\n".join(pieces) + + ## Goals + def generate_goal_section(self) -> str: + pieces: List[str] = ["begin_goal", str(len(self.goal.var_value_pairs))] + pieces.extend([self.var_val_pair2sas_str(p) for p in self.goal.var_value_pairs]) + pieces.append("end_goal") + return "\n".join(pieces) + + def generate_operators_section(self) -> str: + pieces: List[str] = [str(len(self.operators))] + pieces.extend([self.generate_operator_str(o) for o in self.operators]) + return "\n".join(pieces) + + ## Operators + def generate_operator_str(self, o: SasOperator) -> str: + pieces: List[str] = [ + "begin_operator", + o.nm, + str(len(o.prevail)) + ] + # Add prevail conditions + pieces.extend([self.var_val_pair2sas_str(p) for p in o.prevail]) + + # Add effects + pieces.append(str(len(o.effects))) + pieces.extend([self.effect2sas_str(e) for e in o.effects]) + + # Add cost + pieces.append(str(o.cost)) + pieces.append("end_operator") + return "\n".join(pieces) + + + def effect2sas_str(self, e: SasEffect) -> str: + pieces: List[str] = [str(len(e.condition))] + # Effect conditions + pieces.extend([self.var_val_pair2sas_str(p) for p in e.condition]) + # Affected var + pieces.append(str(self.sas_vars.index(e.affected_var))) + # Affected var condition + if e.affected_var_condition is None: + pieces.append("-1") + else: + pieces.append(str(e.affected_var.vals.index(e.affected_var_condition))) + # Result value + pieces.append(str(e.result_val)) + return " ".join(pieces) + + ## Axioms + def generate_axioms_section(self) -> str: + pieces: List[str] = [str(len(self.axioms))] + pieces.extend([self.axiom2sas_str(a) for a in self.axioms]) + return "\n".join(pieces) + + def axiom2sas_str(self, a: SasAxiom) -> str: + pieces: List[str] = ["begin_rule", str(len(a.condition))] + # Conditions + pieces.extend([self.var_val_pair2sas_str(p) for p in a.condition]) + # Affected var + i_var = self.sas_vars.index(a.affected_var) + # i_val_old = a.affected_var.vals.index(a.affected_var_condition) + # i_val_new = a.affected_var.vals.index(a.result_val) + pieces.append(f"{i_var} {a.affected_var_condition} {a.result_val}") + pieces.append("end_rule") + return "\n".join(pieces) + + # Check parse + def check_parse(self) -> bool: + return self.generate_sas() == self.s_sas + + + # Converting to scopeable representation + def z3var(self, v: SasVar) -> z3.Int: + """ + Get the z3 var for a SasVar + """ + if v in self._z3vars.keys(): + self._z3vars[v.nm] = z3.Int(v.nm) + return self._z3vars[v.nm] + + def effect_type(self, p: SasVarValPair) -> EffectTypePDDL: + return EffectTypePDDL(pvar=self.z3var(p.var), index=p.val) + + def varvalpair2condition(self, p: SasVarValPair) -> z3.BoolRef: + return self.z3var(p.var) == p.val + + def sasoperator2skill(self, a: SasOperator) -> SkillPDDL: + # Start with prevail conditions and effect preconditions + preconditions = list(a.prevail) + for effect in a.effects: + if effect.affected_var_condition is not None and effect.affected_var_condition != -1: + preconditions.append(effect.affected_var_condition_pair) + + # Add mutex-derived preconditions + for mutex in self.sas_mutexes: + for pair in mutex.facts: + if pair in preconditions: + # For each other pair in the mutex group, add a precondition that it's not true + other_pairs = [p for p in mutex.facts if p != pair] + for other_pair in other_pairs: + # Here we add a precondition to ensure the other pairs are not true + preconditions.append(self.negate_condition(other_pair)) + + # Convert preconditions to z3.ExprRef + precondition_expr = z3.And(*[self.varvalpair2condition(p) for p in preconditions]) + + # Convert effects to EffectTypePDDL + effects = [] + for effect in a.effects: + affected_var_str = effect.result_var_val_pair.var.nm + index = effect.result_var_val_pair.val # The index in the SasVar.vals list + pvar = z3.Bool(affected_var_str) + effect_pddl = EffectTypePDDL(pvar=pvar, index=index) + effects.append(effect_pddl) + + # Create the SkillPDDL + action_name = a.nm + skill = SkillPDDL(precondition=precondition_expr, action=action_name, effects=effects) + + return skill + + def negate_condition(self, pair: SasVarValPair) -> z3.ExprRef: + return z3.Not(self.varvalpair2condition(pair)) + + def scope(self): + pass + +def test(): + repo_root = "../../.." + # pth = "../../../gripper-painting.sas" + pth_sas_dir = f"{repo_root}/generated_sas" + os.makedirs(pth_sas_dir, exist_ok=True) + pth_sas_in = f"{pth_sas_dir}/gripper-painting.sas" + cmd_s = f"python {repo_root}/oo_scoping/downward_translate/translate_and_scope.py {repo_root}/examples/gripper-painting-domain/domain.pddl {repo_root}/examples/gripper-painting-domain/prob04.pddl --sas-file {pth_sas_in} --scope True" + os.system(cmd_s) + + parser = SasParser(pth=pth_sas_in) + parser.parse() + + def add_pre_extension(s: str, s_suffix: str) -> str: + s_split = s.split(".") + s_split = s_split[:-1] + [s_suffix] + s_split[-1:] + return ".".join(s_split) + + s_sas_out = parser.generate_sas() + with open(add_pre_extension(pth_sas_in, "regen"), "w") as f: + f.write(s_sas_out) + + print(s_sas_out == parser.s_sas) From 8ff966bf33e65d35debdff61450d7adc6418519d Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Tue, 16 Jan 2024 22:07:54 -0800 Subject: [PATCH 05/15] Rename scoping_sas_parser.py -> ..._converter.py --- .../{scoping_sas_parser.py => scoping_sas_converter.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename oo_scoping/downward_translate/{scoping_sas_parser.py => scoping_sas_converter.py} (100%) diff --git a/oo_scoping/downward_translate/scoping_sas_parser.py b/oo_scoping/downward_translate/scoping_sas_converter.py similarity index 100% rename from oo_scoping/downward_translate/scoping_sas_parser.py rename to oo_scoping/downward_translate/scoping_sas_converter.py From b26bde5887498bd1a9d673067b1f365208365bc5 Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 00:23:02 -0800 Subject: [PATCH 06/15] Convert parsed SAS to FD's SASTask representation --- .../downward_translate/translate_and_scope.py | 30 ++-- oo_scoping/sas_parser.py | 144 ++++++++---------- 2 files changed, 85 insertions(+), 89 deletions(-) diff --git a/oo_scoping/downward_translate/translate_and_scope.py b/oo_scoping/downward_translate/translate_and_scope.py index 89a9453..f7ab917 100755 --- a/oo_scoping/downward_translate/translate_and_scope.py +++ b/oo_scoping/downward_translate/translate_and_scope.py @@ -30,12 +30,14 @@ def python_version_supported(): from oo_scoping.downward_translate import pddl from oo_scoping.downward_translate import pddl_parser from oo_scoping.downward_translate import sas_tasks +from oo_scoping import sas_parser + from oo_scoping.downward_translate import simplify from oo_scoping.downward_translate import timers from oo_scoping.downward_translate import tools from oo_scoping.downward_translate import variable_order -from oo_scoping.downward_translate import scoping_sas_parser +from oo_scoping.downward_translate import scoping_sas_converter from oo_scoping.scoping import scope from oo_scoping.writeback_sas import writeback_scoped_sas from oo_scoping.utils import get_atoms @@ -782,41 +784,45 @@ def main(): with open(options.sas_file, "w") as output_file: sas_task.output(output_file) - scope_sas(sas_task) + scope_sas(sas_task=None) def scope_sas(sas_task=None): if sas_task is None: - with timers.timing("Writing output SAS file"): - with open(options.sas_file, "r") as input_file: - sas_task : .output(input_file) + with timers.timing("Reading output SAS file"): + parser = sas_parser.SasParser(pth=options.sas_file) + parser.parse() + sas_task : sas_tasks.SASTask = parser.to_fd() timer = timers.Timer() if options.scope: # This below block of code performs task scoping on the SAS+ domain. - str2var_dict = scoping_sas_parser.make_str2var_dict(sas_task.variables) - str_grounded_action_list = scoping_sas_parser.make_str_grounded_actions( + str2var_dict = scoping_sas_converter.make_str2var_dict(sas_task.variables) + str_grounded_action_list = scoping_sas_converter.make_str_grounded_actions( sas_task.operators ) - cae_triples = scoping_sas_parser.str_grounded_actions2skills( + cae_triples = scoping_sas_converter.str_grounded_actions2skills( str_grounded_action_list, str2var_dict ) - init_cond_list = scoping_sas_parser.make_init_cond_list( + init_cond_list = scoping_sas_converter.make_init_cond_list( sas_task.init.values, str2var_dict ) - goal_cond = scoping_sas_parser.make_goal_cond(sas_task.goal.pairs, str2var_dict) + goal_cond = scoping_sas_converter.make_goal_cond(sas_task.goal.pairs, str2var_dict) rel_pvars, cl_pvars, rel_skills = scope( goals=goal_cond, skills=cae_triples, start_condition=init_cond_list ) sas_file_scoped = get_scoped_file_path(options.sas_file) + def strip_parens(s): + return s.replace('(', '').replace(')', '') + # Make a set for rel pvars and rel actions so that we can lookup amongst these quickly during writeback rel_skill_names = set() for rel_skill in rel_skills: if type(rel_skill.action) == str: - rel_skill_names.add(rel_skill.action[1:-1]) + rel_skill_names.add(strip_parens(rel_skill.action)) elif type(rel_skill.action) == list: for skill_name in rel_skill.action: - rel_skill_names.add(skill_name[1:-1]) + rel_skill_names.add(strip_parens(skill_name)) rel_pvars_names = set() for pvar in rel_pvars: rel_pvars_names.add(str(pvar)[:-2]) diff --git a/oo_scoping/sas_parser.py b/oo_scoping/sas_parser.py index bd67f16..c4440bf 100644 --- a/oo_scoping/sas_parser.py +++ b/oo_scoping/sas_parser.py @@ -5,6 +5,7 @@ import itertools import z3 +from oo_scoping.downward_translate import sas_tasks as fd from oo_scoping.skill_classes import SkillPDDL, EffectTypePDDL @@ -25,6 +26,11 @@ def from_regex_tuple(m: Tuple[str, str, str, str]) -> SasVar: def split_values(s: str) -> Tuple[SasVarVal, ...]: return tuple([SasVarVal(x) for x in s.split("\n")]) + def lookup(self, value: SasVarVal) -> int: + if value is None: + return -1 + return self.vals.index(value) + def get_var_val_pair(self, i: int) -> SasVarValPair: return SasVarValPair(self, i) @@ -48,21 +54,22 @@ class SasEffect: and the var in result must be the same It is maybe wasteful to keep it separate """ - condition: Tuple[SasVarValPair,...] + cond: Tuple[SasVarValPair,...] + var: int affected_var: SasVar - affected_var_condition: Optional[int] - result_val: int + pre: Optional[int] + post: int @property def affected_var_condition_pair(self) -> Optional[SasVarValPair]: - if self.affected_var_condition is None: + if self.pre is None: return None else: - return SasVarValPair(self.affected_var, self.affected_var_condition) + return SasVarValPair(self.affected_var, self.pre) @property def result_var_val_pair(self) -> SasVarValPair: - return SasVarValPair(self.affected_var, self.result_val) + return SasVarValPair(self.affected_var, self.post) @property def full_condition(self) -> Tuple[SasVarValPair,...]: @@ -71,16 +78,16 @@ def full_condition(self) -> Tuple[SasVarValPair,...]: and affected var condition MF: Should we sort by var? Nah. """ - if self.affected_var_condition is None: - return self.condition + if self.pre is None: + return self.cond else: - return self.condition + [self.affected_var_condition_pair] + return self.cond + [self.affected_var_condition_pair] @dataclass(frozen=True, order=True) class SasOperator: nm: str - prevail: Tuple[SasVarValPair,...] + prevail: Tuple[Tuple,...] effects: Tuple[SasEffect,...] cost: int = 1 #Default to 1, in case we don't use action cost @@ -304,10 +311,11 @@ def parse_effect(self, s: str) -> SasEffect: raise ValueError("We miscounted") return SasEffect( - condition=tuple(conds), + cond=tuple(conds), + var=num_var_affected, affected_var=var_affected, - affected_var_condition=val_cond, - result_val=num_val_result + pre=val_cond, + post=num_val_result ) # Helper functions @@ -330,7 +338,8 @@ def get_var_val_nums_from_str(s: str) -> Tuple[int, int]: def get_sas_var_val_pair_from_str(self, s: str) -> SasVarValPair: var_num, val_num = SasParser.get_var_val_nums_from_str(s) - return self.get_sas_var_val_pair_from_ints(var_num, val_num) + # return self.get_sas_var_val_pair_from_ints(var_num, val_num) + return (var_num, val_num) def var_val_pair2ints(self, p: SasVarValPair) -> Tuple[int, int]: @@ -436,18 +445,18 @@ def generate_operator_str(self, o: SasOperator) -> str: def effect2sas_str(self, e: SasEffect) -> str: - pieces: List[str] = [str(len(e.condition))] + pieces: List[str] = [str(len(e.cond))] # Effect conditions - pieces.extend([self.var_val_pair2sas_str(p) for p in e.condition]) + pieces.extend([self.var_val_pair2sas_str(p) for p in e.cond]) # Affected var pieces.append(str(self.sas_vars.index(e.affected_var))) # Affected var condition - if e.affected_var_condition is None: + if e.pre is None: pieces.append("-1") else: - pieces.append(str(e.affected_var.vals.index(e.affected_var_condition))) + pieces.append(str(e.affected_var.vals.index(e.pre))) # Result value - pieces.append(str(e.result_val)) + pieces.append(str(e.post)) return " ".join(pieces) ## Axioms @@ -457,14 +466,14 @@ def generate_axioms_section(self) -> str: return "\n".join(pieces) def axiom2sas_str(self, a: SasAxiom) -> str: - pieces: List[str] = ["begin_rule", str(len(a.condition))] + pieces: List[str] = ["begin_rule", str(len(a.cond))] # Conditions - pieces.extend([self.var_val_pair2sas_str(p) for p in a.condition]) + pieces.extend([self.var_val_pair2sas_str(p) for p in a.cond]) # Affected var i_var = self.sas_vars.index(a.affected_var) # i_val_old = a.affected_var.vals.index(a.affected_var_condition) # i_val_new = a.affected_var.vals.index(a.result_val) - pieces.append(f"{i_var} {a.affected_var_condition} {a.result_val}") + pieces.append(f"{i_var} {a.pre} {a.post}") pieces.append("end_rule") return "\n".join(pieces) @@ -472,62 +481,43 @@ def axiom2sas_str(self, a: SasAxiom) -> str: def check_parse(self) -> bool: return self.generate_sas() == self.s_sas - # Converting to scopeable representation - def z3var(self, v: SasVar) -> z3.Int: - """ - Get the z3 var for a SasVar - """ - if v in self._z3vars.keys(): - self._z3vars[v.nm] = z3.Int(v.nm) - return self._z3vars[v.nm] - - def effect_type(self, p: SasVarValPair) -> EffectTypePDDL: - return EffectTypePDDL(pvar=self.z3var(p.var), index=p.val) - - def varvalpair2condition(self, p: SasVarValPair) -> z3.BoolRef: - return self.z3var(p.var) == p.val - - def sasoperator2skill(self, a: SasOperator) -> SkillPDDL: - # Start with prevail conditions and effect preconditions - preconditions = list(a.prevail) - for effect in a.effects: - if effect.affected_var_condition is not None and effect.affected_var_condition != -1: - preconditions.append(effect.affected_var_condition_pair) - - # Add mutex-derived preconditions - for mutex in self.sas_mutexes: - for pair in mutex.facts: - if pair in preconditions: - # For each other pair in the mutex group, add a precondition that it's not true - other_pairs = [p for p in mutex.facts if p != pair] - for other_pair in other_pairs: - # Here we add a precondition to ensure the other pairs are not true - preconditions.append(self.negate_condition(other_pair)) - - # Convert preconditions to z3.ExprRef - precondition_expr = z3.And(*[self.varvalpair2condition(p) for p in preconditions]) - - # Convert effects to EffectTypePDDL - effects = [] - for effect in a.effects: - affected_var_str = effect.result_var_val_pair.var.nm - index = effect.result_var_val_pair.val # The index in the SasVar.vals list - pvar = z3.Bool(affected_var_str) - effect_pddl = EffectTypePDDL(pvar=pvar, index=index) - effects.append(effect_pddl) - - # Create the SkillPDDL - action_name = a.nm - skill = SkillPDDL(precondition=precondition_expr, action=action_name, effects=effects) - - return skill - - def negate_condition(self, pair: SasVarValPair) -> z3.ExprRef: - return z3.Not(self.varvalpair2condition(pair)) - - def scope(self): - pass + def to_fd(self) -> fd.SASTask: + # Variables + ranges = [v.range for v in self.sas_vars] + axiom_layers = [v.axiom_layer for v in self.sas_vars] + value_names = [v.nm for v in self.sas_vars] + variables = fd.SASVariables(ranges, axiom_layers, value_names) + + # Mutexes + mutexes = [fd.SASMutexGroup(m.facts) for m in self.mutexes] + + # Init + init = fd.SASInit([p.val for p in self.initial_state.var_value_pairs]) + + # Goal + goal = fd.SASGoal(self.goal.var_value_pairs) + + # Operators + operators = [] + for op in self.operators: + pre_post = [] + for e in op.effects: + pre_as_int = self.sas_vars[e.var].lookup(e.pre) + pre_post.append( (e.var, pre_as_int, e.post, e.cond) ) + operators.append(fd.SASOperator(op.nm, op.prevail, pre_post, op.cost)) + + # Axioms + if self.axioms: + raise NotImplementedError("Axioms not implemented") + axioms = [] + + # Metric + metric = self.metric + + sas_task = fd.SASTask(variables, mutexes, init, goal, operators, axioms, metric) + return sas_task + def test(): repo_root = "../../.." From 82a9a4578b5bcc2984db8f459f85afec6a4663ac Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 01:23:22 -0800 Subject: [PATCH 07/15] Add sas_experiment.py --- experiments/sas_experiment.py | 277 ++++++++++++++++++ oo_scoping/downward_translate/options.py | 5 + .../downward_translate/translate_and_scope.py | 50 ++-- 3 files changed, 307 insertions(+), 25 deletions(-) create mode 100644 experiments/sas_experiment.py diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py new file mode 100644 index 0000000..806839f --- /dev/null +++ b/experiments/sas_experiment.py @@ -0,0 +1,277 @@ +import os, time, argparse, subprocess, json, shutil, glob, re, enum +from typing import Iterable + + +import pandas as pd +import numpy as np + +# Example command: +# python experiments/fd_experiment.py 3 examples/IPC_domains_propositional/driverlog/domain.pddl examples/IPC_domains_propositional/driverlog/prob15.pddl ~/Documents/GitHub/downward/fast-downward.py ./logs --problems_dir randomly_generated_prob_files/driverlog/ +""" +TODO: +Get state-visited counts +Use https://gist.github.com/nawatts/e2cdca610463200c12eac2a14efc0bfb to print output +""" +repo_root = os.path.dirname(os.path.dirname(__file__)) + +# Helper functions +def get_scoped_file_path(unscoped_file): + return add_path_suffix(unscoped_file, "_scoped") + +def add_path_suffix(p, s): + basename, ext = os.path.splitext(p) + return basename + s + ext + + +# Main function +def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_clear=False, run_id=None): + log_dir = log_dir + "/fd/" + plan_type + "/" + sas_file.split('.')[-2] + start_time_exp = time.time() + + if run_id is None or run_id == -1: + run_id_start = 0 + else: + run_id_start = run_id + + if run_id != -1: + # Clear log dir if force_clear is True + # if force_clear and os.path.exists(log_dir): + # shutil.rmtree(log_dir) + # Make the log directory. Throws an error if the directory already exists + os.makedirs(log_dir, exist_ok=True) + + # Save arguments to log_dir + args_dict = { + "n_runs":n_runs, + "problem":sas_file, + "fd_path":fd_path, + "log_dir":log_dir + } + with open(f"{log_dir}/args.json", "w") as f: + json.dump({k: str(v) for k, v in args_dict.items()}, f) + + timings_dict = { + "scoping":[], + "plan_scoped_time":[], + "total_scoped_time":[], + "total_unscoped_time":[], + "plan_unscoped_generated_nodes": [], + "plan_unscoped_node_expansions": [], + "plan_scoped_generated_nodes": [], + "plan_scoped_node_expansions": [], + "encoding_size": [], + "scoping_exit_code": [], + "plan_scoped_exit_code": [], + "plan_unscoped_exit_code": [], + } + + # This would be more precise if we recorded time for multiple iterations of each portion, then divided. TODO consider doing this. + for i_run in range(run_id_start, run_id_start + n_runs): + print(f"Run {i_run}") + log_dir_this_run = f"{log_dir}/{i_run}" + timings_path = f"{log_dir_this_run}/times.json" + + if run_id != -1: + if force_clear and os.path.exists(log_dir_this_run): + shutil.rmtree(log_dir_this_run) + os.makedirs(log_dir_this_run, exist_ok=False) + + # Scoping + print("Scoping") + scope_cmd_output = just_scope(sas_path=sas_file) + scope_start = time.time() + scope_cmd_output = just_scope(sas_path=sas_file) + scope_end = time.time() + scoping_time = scope_end - scope_start + timings_dict["scoping"].append(scoping_time) + timings_dict["scoping_exit_code"].append(scope_cmd_output.returncode) + with open(timings_path, "w") as f: + json.dump(timings_dict, f) + save_cmd_output(scope_cmd_output, f"{log_dir_this_run}/translate_and_scope") + if scope_cmd_output.returncode != 0: + raise ValueError(f"Scoping failed with returncode {scope_cmd_output.returncode}\nstderr: {scope_cmd_output.stderr}\nstdout: {scope_cmd_output.stdout}") + + sas_scoped_path = sas_file[:-4] + "_scoped" + sas_file[-4:] + + # Planning on scoped + print("Planning on scoped") + plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) + plan_scoped_start_time = time.time() + plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) + plan_scoped_end_time = time.time() + plan_scoped_time = plan_scoped_end_time - plan_scoped_start_time + timings_dict["plan_scoped_time"].append(plan_scoped_time) + timings_dict["total_scoped_time"].append(scoping_time + plan_scoped_time) + timings_dict["plan_scoped_exit_code"].append(plan_scoped_cmd_output.returncode) + if plan_scoped_cmd_output.returncode == 0: + timings_dict["plan_scoped_generated_nodes"].append(int(re.search(r"(Generated) \d*", plan_scoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) + timings_dict["plan_scoped_node_expansions"].append(int(re.search(r"(Expanded) \d*", plan_scoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) + with open(timings_path, "w") as f: + json.dump(timings_dict, f) + save_cmd_output(plan_scoped_cmd_output, f"{log_dir_this_run}/plan_scoped") + + # Planning on unscoped + print("Planning on unscoped") + plan_unscoped_cmd_output = plan(sas_file, fd_path, plan_type=plan_type) + plan_unscoped_start_time = time.time() + plan_unscoped_cmd_output = plan(sas_file, fd_path, plan_type=plan_type) + # plan_unscoped_cmd_output = plan(sas_2_path, fd_path, plan_type=plan_type) + plan_unscoped_end_time = time.time() + plan_unscoped_time = plan_unscoped_end_time - plan_unscoped_start_time + timings_dict["total_unscoped_time"].append(plan_unscoped_time) + timings_dict["plan_unscoped_exit_code"].append(plan_unscoped_cmd_output.returncode) + if plan_unscoped_cmd_output.returncode == 0: + timings_dict["plan_unscoped_generated_nodes"].append(int(re.search(r"(Generated) \d*", plan_unscoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) + timings_dict["plan_unscoped_node_expansions"].append(int(re.search(r"(Expanded) \d*", plan_unscoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) + with open(timings_path, "w") as f: + json.dump(timings_dict, f) + save_cmd_output(plan_unscoped_cmd_output, f"{log_dir_this_run}/plan_unscoped") + else: + print("Loading results") + with open(timings_path, "r") as f: + loaded_timings = json.load(f) + for key in loaded_timings.keys(): + value = np.nan if loaded_timings[key] == [] else loaded_timings[key][0] + timings_dict[key].append(value) + + end_time_exp = time.time() + experiment_duration = end_time_exp - start_time_exp + print(f"Finished experiment") + print(f"Ran {n_runs} trials for a total duration of {experiment_duration}") + print(timings_dict) + df_times = pd.DataFrame(data=timings_dict) + s_times_avg = df_times.mean() + s_times_avg.name = 'avg' + s_times_std = df_times.std() + s_times_std.name = 'std' + s_times_cv = s_times_std / s_times_avg + s_times_cv.name = "cv" + df_time_summary = pd.concat([s_times_avg, s_times_std, s_times_cv], axis=1) + print(f"Timing summary:") + print(df_time_summary) + if n_runs > 1: + df_time_summary.to_csv(f"{log_dir}/timing_summary.csv", index=True) + return timings_dict + + +def run_experiments_on_folder(n_runs, sas_dir, fd_path, log_dir,plan_type: str, force_clear=False): + total_timings_dict = {} + num_solved_problems = 0 + problem_files = glob.glob(sas_dir + "/*.sas") + + for sas_problem in problem_files: + log_dir_this_problem = log_dir + "/" + sas_problem.split(".")[-2] + + try: + curr_timings_dict = run_experiment(n_runs, sas_problem, fd_path, log_dir_this_problem, plan_type=plan_type, force_clear=force_clear) + except ValueError: + # In this case, the randomly-generated problem was impossible to solve. + # Simply skip and move on. + print(f"Problem {sas_problem} is impossible to solve.") + continue + + num_solved_problems += 1 + if len(total_timings_dict) == 0: + total_timings_dict = curr_timings_dict + else: + for key in total_timings_dict.keys(): + total_timings_dict[key] += curr_timings_dict[key] + + # Convert timings dict to dataframe for easy processing (code mostly + # copied from above method). + df_times = pd.DataFrame(data=total_timings_dict) + s_times_avg = df_times.mean() + s_times_avg.name = 'avg' + s_times_std = df_times.std() + s_times_std.name = 'std' + s_times_cv = s_times_std / s_times_avg + s_times_cv.name = "cv" + df_time_summary = pd.concat([s_times_avg, s_times_std, s_times_cv], axis=1) + + print(f"Finished experiments; {num_solved_problems} problems out of {len(problem_files)} were solvable.") + print(f"Aggregate Timing summary:") + print(df_time_summary) + + log_dir_this_domain = '/'.join(log_dir_this_problem.split('/')[:-1]) + os.makedirs(log_dir_this_domain, exist_ok=True) + df_time_summary.to_csv(f"{log_dir_this_domain}/timing_summary.csv", index=True) + + +def save_cmd_output(cmd_output, save_dir): + os.makedirs(save_dir, exist_ok=False) + outpaths = { + "args":f"{save_dir}/args.txt", + "stdout":f"{save_dir}/stdout.txt", + "stderr":f"{save_dir}/stderr.txt", + "returncode":f"{save_dir}/returncode.txt" + } + + with open(outpaths["args"], "w") as f: + # TODO would be better to store as a csv perhaps + f.write(str(cmd_output.args)) + + with open(outpaths["stdout"], "w") as f: + f.write(cmd_output.stdout.decode('UTF-8')) + + with open(outpaths["stderr"], "w") as f: + f.write(cmd_output.stderr.decode('UTF-8')) + + with open(outpaths["returncode"], "w") as f: + f.write(str(cmd_output.returncode)) + + return outpaths + + +def translate(domain, problem, sas_path): + cmd_pieces = ["python", f"{repo_root}/oo_scoping/downward_translate/translate_and_scope.py", domain, problem, "--sas-file", sas_path] + print(' '.join(cmd_pieces)) + print() + cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False) + return cmd_output + + +def translate_and_scope(domain, problem, unscoped_sas_path): + cmd_pieces = ["python", f"{repo_root}/oo_scoping/downward_translate/translate_and_scope.py", domain, problem, "--sas-file", unscoped_sas_path, "--scope", "True"] + cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False) + return cmd_output + +def just_scope(sas_path): + cmd_pieces = ["python", f"{repo_root}/oo_scoping/downward_translate/translate_and_scope.py", "--sas-file", sas_path, "--scope", "True", "--scope-only", "True"] + print(" ".join(cmd_pieces)) + cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False) + return cmd_output + + +SEARCH_CONFIGS = { + "lmcut":"astar(lmcut())", + "ms":"astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[goal_relevance,dfp,total_order])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50k,threshold_before_merge=1))", + "h2":"astar(hm(m=2, verbosity=normal, transform=no_transform(), cache_estimates=true))" + # h2 is incredibly slow in FD. Don't use it. +} + +def plan(sas_path, fd_path, plan_type: str = "lmcut"): + search_config = SEARCH_CONFIGS[plan_type] + # Note: we don't call "python" at the beginning + # Note: "--" separates the file path arg from the remaining args + cmd_pieces = [fd_path, sas_path, "--", "--search", search_config] + cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False) + return cmd_output + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("n_runs" ,type=int) + parser.add_argument("sas_file", type=str) + parser.add_argument("fd_path", type=str) + parser.add_argument("log_dir", type=str) + parser.add_argument("--plan_type", type=str, default="lmcut", choices=list(SEARCH_CONFIGS.keys()), help="Plan techniques to use.") + parser.add_argument("--force_clear_log_dir", default=False, action='store_true') + parser.add_argument("--problems_dir", type=str, required=False, default=None) + parser.add_argument("--run_id", type=int, default=None) + + args = parser.parse_args() + + if args.problems_dir is None: + run_experiment(args.n_runs, args.sas_file, args.fd_path, args.log_dir, plan_type=args.plan_type, force_clear=args.force_clear_log_dir, run_id=args.run_id) + else: + run_experiments_on_folder(args.n_runs, args.problems_dir, args.fd_path, args.log_dir, plan_type=args.plan_type, force_clear=args.force_clear_log_dir) diff --git a/oo_scoping/downward_translate/options.py b/oo_scoping/downward_translate/options.py index 70b8165..0114229 100644 --- a/oo_scoping/downward_translate/options.py +++ b/oo_scoping/downward_translate/options.py @@ -51,6 +51,11 @@ def parse_args(): default=False, help="Whether or not to run scoping (default: %(default)s)", ) + argparser.add_argument( + "--scope-only", + default=False, + help="Whether to only scope (default: %(default)s)", + ) argparser.add_argument( "--invariant-generation-max-time", default=300, diff --git a/oo_scoping/downward_translate/translate_and_scope.py b/oo_scoping/downward_translate/translate_and_scope.py index f7ab917..a7b806b 100755 --- a/oo_scoping/downward_translate/translate_and_scope.py +++ b/oo_scoping/downward_translate/translate_and_scope.py @@ -760,38 +760,38 @@ def dump_statistics(sas_task): def main(): - timer = timers.Timer() - with timers.timing("Parsing", True): - task = pddl_parser.open( - domain_filename=options.domain, task_filename=options.task - ) + if not options.scope_only: + timer = timers.Timer() + with timers.timing("Parsing", True): + task = pddl_parser.open( + domain_filename=options.domain, task_filename=options.task + ) - with timers.timing("Normalizing task"): - normalize.normalize(task) + with timers.timing("Normalizing task"): + normalize.normalize(task) - if options.generate_relaxed_task: - # Remove delete effects. - for action in task.actions: - for index, effect in reversed(list(enumerate(action.effects))): - if effect.literal.negated: - del action.effects[index] + if options.generate_relaxed_task: + # Remove delete effects. + for action in task.actions: + for index, effect in reversed(list(enumerate(action.effects))): + if effect.literal.negated: + del action.effects[index] - sas_task = pddl_to_sas(task) + sas_task = pddl_to_sas(task) - dump_statistics(sas_task) + dump_statistics(sas_task) - with timers.timing("Writing output SAS file"): - with open(options.sas_file, "w") as output_file: - sas_task.output(output_file) + with timers.timing("Writing output SAS file"): + with open(options.sas_file, "w") as output_file: + sas_task.output(output_file) - scope_sas(sas_task=None) + scope_sas(sas_path=options.sas_file) -def scope_sas(sas_task=None): - if sas_task is None: - with timers.timing("Reading output SAS file"): - parser = sas_parser.SasParser(pth=options.sas_file) - parser.parse() - sas_task : sas_tasks.SASTask = parser.to_fd() +def scope_sas(sas_path): + with timers.timing("Reading SAS file"): + parser = sas_parser.SasParser(pth=sas_path) + parser.parse() + sas_task : sas_tasks.SASTask = parser.to_fd() timer = timers.Timer() if options.scope: From e527eae119f541d247f39c7db44ae250c57d8985 Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 01:48:55 -0800 Subject: [PATCH 08/15] Add named args for onager compatibility --- experiments/sas_experiment.py | 42 +++++++++++++++++------------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index 806839f..2f1bf4a 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -93,23 +93,6 @@ def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_cle sas_scoped_path = sas_file[:-4] + "_scoped" + sas_file[-4:] - # Planning on scoped - print("Planning on scoped") - plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) - plan_scoped_start_time = time.time() - plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) - plan_scoped_end_time = time.time() - plan_scoped_time = plan_scoped_end_time - plan_scoped_start_time - timings_dict["plan_scoped_time"].append(plan_scoped_time) - timings_dict["total_scoped_time"].append(scoping_time + plan_scoped_time) - timings_dict["plan_scoped_exit_code"].append(plan_scoped_cmd_output.returncode) - if plan_scoped_cmd_output.returncode == 0: - timings_dict["plan_scoped_generated_nodes"].append(int(re.search(r"(Generated) \d*", plan_scoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) - timings_dict["plan_scoped_node_expansions"].append(int(re.search(r"(Expanded) \d*", plan_scoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) - with open(timings_path, "w") as f: - json.dump(timings_dict, f) - save_cmd_output(plan_scoped_cmd_output, f"{log_dir_this_run}/plan_scoped") - # Planning on unscoped print("Planning on unscoped") plan_unscoped_cmd_output = plan(sas_file, fd_path, plan_type=plan_type) @@ -126,6 +109,23 @@ def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_cle with open(timings_path, "w") as f: json.dump(timings_dict, f) save_cmd_output(plan_unscoped_cmd_output, f"{log_dir_this_run}/plan_unscoped") + + # Planning on scoped + print("Planning on scoped") + plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) + plan_scoped_start_time = time.time() + plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) + plan_scoped_end_time = time.time() + plan_scoped_time = plan_scoped_end_time - plan_scoped_start_time + timings_dict["plan_scoped_time"].append(plan_scoped_time) + timings_dict["total_scoped_time"].append(scoping_time + plan_scoped_time) + timings_dict["plan_scoped_exit_code"].append(plan_scoped_cmd_output.returncode) + if plan_scoped_cmd_output.returncode == 0: + timings_dict["plan_scoped_generated_nodes"].append(int(re.search(r"(Generated) \d*", plan_scoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) + timings_dict["plan_scoped_node_expansions"].append(int(re.search(r"(Expanded) \d*", plan_scoped_cmd_output.stdout.decode()).group(0).split(' ')[1])) + with open(timings_path, "w") as f: + json.dump(timings_dict, f) + save_cmd_output(plan_scoped_cmd_output, f"{log_dir_this_run}/plan_scoped") else: print("Loading results") with open(timings_path, "r") as f: @@ -260,10 +260,10 @@ def plan(sas_path, fd_path, plan_type: str = "lmcut"): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("n_runs" ,type=int) - parser.add_argument("sas_file", type=str) - parser.add_argument("fd_path", type=str) - parser.add_argument("log_dir", type=str) + parser.add_argument("--n_runs" ,type=int) + parser.add_argument("--sas_file", type=str) + parser.add_argument("--fd_path", type=str) + parser.add_argument("--log_dir", type=str) parser.add_argument("--plan_type", type=str, default="lmcut", choices=list(SEARCH_CONFIGS.keys()), help="Plan techniques to use.") parser.add_argument("--force_clear_log_dir", default=False, action='store_true') parser.add_argument("--problems_dir", type=str, required=False, default=None) From 9fedf2c9b1d398db659b53ce214fd14d5c70f724 Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 01:55:11 -0800 Subject: [PATCH 09/15] Fix fdr-generator path --- experiments/sas_experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index 2f1bf4a..93b3a55 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -236,7 +236,7 @@ def translate_and_scope(domain, problem, unscoped_sas_path): return cmd_output def just_scope(sas_path): - cmd_pieces = ["python", f"{repo_root}/oo_scoping/downward_translate/translate_and_scope.py", "--sas-file", sas_path, "--scope", "True", "--scope-only", "True"] + cmd_pieces = ["python", f"{repo_root}/oo_scoping/downward_translate/translate_and_scope.py", "--sas-file", 'fdr-generator/benchmarks/'+sas_path, "--scope", "True", "--scope-only", "True"] print(" ".join(cmd_pieces)) cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False) return cmd_output From 92ac8991ff720ced81e84fa0f6227f05470f34da Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 02:01:06 -0800 Subject: [PATCH 10/15] Add 10-min planning timeout --- experiments/sas_experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index 93b3a55..f485a8f 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -254,7 +254,7 @@ def plan(sas_path, fd_path, plan_type: str = "lmcut"): # Note: we don't call "python" at the beginning # Note: "--" separates the file path arg from the remaining args cmd_pieces = [fd_path, sas_path, "--", "--search", search_config] - cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False) + cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False, timeout=600) return cmd_output From 4081e1b80d7b1913afdc31988d60cde8e74812ff Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 02:04:36 -0800 Subject: [PATCH 11/15] Remove extra fn calls --- experiments/sas_experiment.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index f485a8f..929e593 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -78,7 +78,6 @@ def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_cle # Scoping print("Scoping") - scope_cmd_output = just_scope(sas_path=sas_file) scope_start = time.time() scope_cmd_output = just_scope(sas_path=sas_file) scope_end = time.time() @@ -95,7 +94,6 @@ def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_cle # Planning on unscoped print("Planning on unscoped") - plan_unscoped_cmd_output = plan(sas_file, fd_path, plan_type=plan_type) plan_unscoped_start_time = time.time() plan_unscoped_cmd_output = plan(sas_file, fd_path, plan_type=plan_type) # plan_unscoped_cmd_output = plan(sas_2_path, fd_path, plan_type=plan_type) @@ -112,7 +110,6 @@ def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_cle # Planning on scoped print("Planning on scoped") - plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) plan_scoped_start_time = time.time() plan_scoped_cmd_output = plan(sas_scoped_path, fd_path, plan_type=plan_type) plan_scoped_end_time = time.time() From dfee0a5b2048c96e326bdc3d95dc04519ca2e4bb Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 10:23:16 -0800 Subject: [PATCH 12/15] Fix path issues with . characters --- experiments/sas_experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index 929e593..f7c1609 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -25,7 +25,7 @@ def add_path_suffix(p, s): # Main function def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_clear=False, run_id=None): - log_dir = log_dir + "/fd/" + plan_type + "/" + sas_file.split('.')[-2] + log_dir = log_dir + "/fd/" + plan_type + "/" + sas_file.split('/')[0] + "/" + sas_file.split('/')[1] start_time_exp = time.time() if run_id is None or run_id == -1: From a4a922be4b10f47c8eaf0d55c9afadc4a74d9726 Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 16:39:31 -0800 Subject: [PATCH 13/15] Fix tag parsing for structural experiments --- experiments/sas_experiment.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index f7c1609..47c1122 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -25,7 +25,8 @@ def add_path_suffix(p, s): # Main function def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_clear=False, run_id=None): - log_dir = log_dir + "/fd/" + plan_type + "/" + sas_file.split('/')[0] + "/" + sas_file.split('/')[1] + tag = sas_file.split('/')[0] + "/" + sas_file.split('/')[1] + "/" + sas_file.split('/')[2][:-4] + log_dir = log_dir + "/fd/" + plan_type + "/" + tag start_time_exp = time.time() if run_id is None or run_id == -1: From 82e70fad4159161d8c7e9c76c4da9da984063e30 Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 17:01:47 -0800 Subject: [PATCH 14/15] Fix fdr-generator paths --- experiments/sas_experiment.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index 47c1122..d03e927 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -26,6 +26,7 @@ def add_path_suffix(p, s): # Main function def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_clear=False, run_id=None): tag = sas_file.split('/')[0] + "/" + sas_file.split('/')[1] + "/" + sas_file.split('/')[2][:-4] + sas_file = 'fdr-generator/benchmarks/' + sas_file log_dir = log_dir + "/fd/" + plan_type + "/" + tag start_time_exp = time.time() @@ -75,7 +76,7 @@ def run_experiment(n_runs, sas_file, fd_path, log_dir, plan_type: str, force_cle if run_id != -1: if force_clear and os.path.exists(log_dir_this_run): shutil.rmtree(log_dir_this_run) - os.makedirs(log_dir_this_run, exist_ok=False) + os.makedirs(log_dir_this_run, exist_ok=True) # Scoping print("Scoping") @@ -234,7 +235,7 @@ def translate_and_scope(domain, problem, unscoped_sas_path): return cmd_output def just_scope(sas_path): - cmd_pieces = ["python", f"{repo_root}/oo_scoping/downward_translate/translate_and_scope.py", "--sas-file", 'fdr-generator/benchmarks/'+sas_path, "--scope", "True", "--scope-only", "True"] + cmd_pieces = ["python", f"{repo_root}/oo_scoping/downward_translate/translate_and_scope.py", "--sas-file", sas_path, "--scope", "True", "--scope-only", "True"] print(" ".join(cmd_pieces)) cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False) return cmd_output @@ -252,6 +253,8 @@ def plan(sas_path, fd_path, plan_type: str = "lmcut"): # Note: we don't call "python" at the beginning # Note: "--" separates the file path arg from the remaining args cmd_pieces = [fd_path, sas_path, "--", "--search", search_config] + print(' '.join(cmd_pieces)) + print() cmd_output = subprocess.run(cmd_pieces, capture_output=True, shell=False, timeout=600) return cmd_output From 42d5b5dc7316b23f090bc47085fb10319041546f Mon Sep 17 00:00:00 2001 From: Cam Allen Date: Wed, 17 Jan 2024 17:03:33 -0800 Subject: [PATCH 15/15] Allow existing dirs --- experiments/sas_experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/experiments/sas_experiment.py b/experiments/sas_experiment.py index d03e927..5e21b54 100644 --- a/experiments/sas_experiment.py +++ b/experiments/sas_experiment.py @@ -197,7 +197,7 @@ def run_experiments_on_folder(n_runs, sas_dir, fd_path, log_dir,plan_type: str, def save_cmd_output(cmd_output, save_dir): - os.makedirs(save_dir, exist_ok=False) + os.makedirs(save_dir, exist_ok=True) outpaths = { "args":f"{save_dir}/args.txt", "stdout":f"{save_dir}/stdout.txt",