From d1859eb3a84567817a7adc580e59b343a1220fec Mon Sep 17 00:00:00 2001 From: Leonardo Correa Date: Mon, 4 Dec 2023 12:34:37 -0300 Subject: [PATCH] Add dependency graph --- examples/cyclic.plp | 6 ++ examples/cyclic_neg.plp | 2 + examples/cyclic_pos.plp | 2 + pasp/app.py | 28 +++++-- pasp/grammar.lark | 45 ++++++------ pasp/grammar.py | 158 ++++++++++++++++++++++++++++++---------- pasp/graph.py | 146 +++++++++++++++++++++++++++++++++++++ pasp/program.py | 6 +- 8 files changed, 328 insertions(+), 65 deletions(-) create mode 100644 examples/cyclic.plp create mode 100644 examples/cyclic_neg.plp create mode 100644 examples/cyclic_pos.plp create mode 100644 pasp/graph.py diff --git a/examples/cyclic.plp b/examples/cyclic.plp new file mode 100644 index 0000000..765417b --- /dev/null +++ b/examples/cyclic.plp @@ -0,0 +1,6 @@ +a :- not b. +b :- not a. +c :- a, not d. +d :- a, not c. +e :- c, not a. +e :- d, not a. \ No newline at end of file diff --git a/examples/cyclic_neg.plp b/examples/cyclic_neg.plp new file mode 100644 index 0000000..efce479 --- /dev/null +++ b/examples/cyclic_neg.plp @@ -0,0 +1,2 @@ +p(1,2). +q(X) :- p(X,Y), not q(Y). \ No newline at end of file diff --git a/examples/cyclic_pos.plp b/examples/cyclic_pos.plp new file mode 100644 index 0000000..aaf9262 --- /dev/null +++ b/examples/cyclic_pos.plp @@ -0,0 +1,2 @@ +p(1,2). +q(X) :- p(X,Y), q(Y). \ No newline at end of file diff --git a/pasp/app.py b/pasp/app.py index 75b3d17..7b490e3 100644 --- a/pasp/app.py +++ b/pasp/app.py @@ -1,5 +1,7 @@ import pasp import sys +from program import Program +from graph import Graph ARGUMENTS = ["sem", "psem", "help"] ARGUMENTS_SHORTCUTS = ["s", "p", "h"] @@ -130,16 +132,30 @@ def parse_args() -> dict: def main(): print("pasp version", pasp.__version__) - A, F = parse_args() - if len(F) > 0: - P = pasp.parse(*F, semantics = A["sem"]) - if "psemantics" not in P.directives: P.directives["psemantics"] = {"psemantics": A["psem"]} - P() + args, files = parse_args() + if len(files) > 0: + prog = pasp.parse(*files, semantics = args["sem"]) + if "psemantics" not in prog.directives: prog.directives["psemantics"] = {"psemantics": args["psem"]} + + pos = prog.graph.isPositiveCyclic() + neg = prog.graph.isNegativeCyclic() + isCyclic = pos or neg + + #isCyclic, v = prog.graph.isCyclic() + + if isCyclic: + print("Program constains cycle(s).") + else: + print("Program doesn't contain a cycle.") + + prog.run() + + print(prog) else: print("Reading from stdin") inp = "" for l in sys.stdin: inp += l - pasp.exact(pasp.parse(inp, from_str = True, semantics = A["sem"]), psemantics = A["psem"]) + pasp.exact(pasp.parse(inp, from_str = True, semantics = args["sem"]), psemantics = args["psem"]) return 0 if __name__ == "__main__": main() diff --git a/pasp/grammar.lark b/pasp/grammar.lark index 0e08061..9e9a9ab 100644 --- a/pasp/grammar.lark +++ b/pasp/grammar.lark @@ -34,8 +34,8 @@ CMP_OP: EQQ | LEQ | GEQ -frac: REAL "/" REAL -prob: frac | REAL +frac: FLOAT "/" FLOAT +prob: frac | FLOAT // Constants. WORD: /[a-z]\w*/ @@ -65,18 +65,18 @@ LEARN: "?" CONST: "!" // Set. -set: "{" (((ID | WORD) ("," (ID | WORD))+) | interval) "}" +set: "{" (((INT | WORD) ("," (INT | WORD))+) | interval) "}" // Atom. _atom: WORD // Interval. -interval: (ID | WORD) ".." (ID | WORD) +interval: (INT | WORD) ".." (INT | WORD) // Predicate. _ground: (_atom | grpred) _nground: (_atom | pred) -grpred: WORD "(" (_ground | ID | interval) ("," (_ground | ID | interval))* ")" -query_pred: WORD "(" (VAR | _nground | ID | interval) ((";" | ",") (VAR | _nground | ID | interval))* ")" -pred: WORD "(" (_nground | ID | interval | VAR) ("," (_nground | ID | interval | VAR))* ")" +grpred: WORD "(" (_ground | INT | interval) ("," (_ground | INT | interval))* ")" +query_pred: WORD "(" (VAR | _nground | INT | interval) ((";" | ",") (VAR | _nground | INT | interval))* ")" +pred: WORD "(" (_nground | INT | interval | VAR) ("," (_nground | INT | interval | VAR))* ")" // Literal. lit: NEG* _nground grlit: NEG* _ground @@ -117,20 +117,20 @@ adr: (ad_head{_nground} | lad_head{_nground}) ":-" body "." _ad: ad | adr // Python function call with arguments. -py_func_args: WORD | REAL | ID -py_func_kwargs: WORD "=" (WORD | REAL | ID) +py_func_args: WORD | FLOAT | INT +py_func_kwargs: WORD "=" (WORD | FLOAT | INT) py_func_call: PY_FUNC ("(" (py_func_args | py_func_kwargs) ("," (py_func_args | py_func_kwargs))* ")")? // Data special rule. test: "test" "(" (path | ("@" py_func_call)) ")" train: "train" "(" (path | ("@" py_func_call)) ")" -data: WORD "(" (WORD | ID) ")" "~" test ("," train)? "." +data: WORD "(" (WORD | INT) ")" "~" test ("," train)? "." // Torch block. python: "#python" /(?:.|\n)+?(?=\#end\.)/ "#end." // PyTorch hub. hub: "@" PY_FUNC ("on" (("\"" LOCAL_NET "\"" ("at" "\"local\"")?) | ("\"" GITHUB "\"" "at" "\"github\"")))? // Optimizer parameters. -_param: WORD "=" (REAL | BOOL | NULL | ("\"" /\w+/ "\"")) +_param: WORD "=" (FLOAT | BOOL | NULL | ("\"" /\w+/ "\"")) // Sequence of parameters. params: _param ("," _param)* // Neural fact. @@ -139,13 +139,16 @@ nrule: (LEARN | CONST) "::" WORD "(" VAR (";" set)? ")" "as" hub ("with" params) nad: (LEARN | CONST) "::" WORD "(" VAR "," set (";" set)? ")" "as" hub ("with" params)? ":-" lit ("," lit)* "." _neural: nrule | nad +// Import another file into the current program +import_file: "#import" /(.+)\/([^\/]+)/ + // Learning options and directive. -_learn_opt_lr: /lr/ "=" REAL +_learn_opt_lr: /lr/ "=" FLOAT _learn_opt_alg: /alg/ "=" "\"" (/lagrange/ | /neurasp/ | /fixpoint/) "\"" -_learn_opt_batch: /batch/ "=" ID -_learn_opt_niters: /niters/ "=" ID -_learn_opt_momentum: /momentum/ "=" REAL -_learn_opt_smoothing: /smoothing/ "=" REAL +_learn_opt_batch: /batch/ "=" INT +_learn_opt_niters: /niters/ "=" INT +_learn_opt_momentum: /momentum/ "=" FLOAT +_learn_opt_smoothing: /smoothing/ "=" FLOAT _learn_opt_display: /display/ "=" "\"" (/none/ | /progress/ | /loglikelihood/) "\"" _learn_opt: (_learn_opt_lr | _learn_opt_alg | _learn_opt_batch | _learn_opt_niters | _learn_opt_display | _learn_opt_smoothing | _learn_opt_momentum) @@ -162,7 +165,7 @@ semantics: "#semantics" (("(" _semantics_exp ")") | (_semantics_exp)) "." constraint: ":-" body "." // Binary operation. -bop: ((ID | VAR | bop | WORD) OP (ID | VAR | bop | WORD)) | (VAR EQQ interval) +bop: ((INT | VAR | bop | WORD) OP (INT | VAR | bop | WORD)) | (VAR EQQ interval) // Query. qelement: (NEG | UND)? (WORD | query_pred) @@ -171,15 +174,15 @@ _interp_exp: interp ("|" interp)? query: "#query" (("(" _interp_exp ")") | ( _interp_exp )) "."? // Constant definition. -constdef: "#const" WORD "=" ID "." +constdef: "#const" WORD "=" INT "." -plp: (constdef | _fact | _rule | _ad | _neural | data | python | constraint | query | learn | semantics | _aggr)* +plp: (constdef | _fact | _rule | _ad | _neural | data | python | import_file | constraint | query | learn | semantics | _aggr )* COMMENT: "%" /[^\n]*/ NEWLINE %import common.WORD -> NAME -%import common.INT -> ID -%import common.NUMBER -> REAL +%import common.INT -> INT +%import common.NUMBER -> FLOAT %import common.NEWLINE -> NEWLINE %import common.WS diff --git a/pasp/grammar.py b/pasp/grammar.py index 2df07bd..a3639c6 100644 --- a/pasp/grammar.py +++ b/pasp/grammar.py @@ -4,8 +4,18 @@ from .program import ProbFact, Query, VarQuery, ProbRule, Program, CredalFact, unique_fact, \ Semantics, Data from .program import AnnotatedDisjunction, NeuralRule, NeuralAD, unique_pgrule_id +from .graph import Graph -def read(*files: str, G: lark.Lark = None, from_str: bool = False, start = "plp") -> lark.Tree: +NODE_TYPE = 0 +NODE_REPR = 1 +NODE_VAL = 2 +NODE_CTX = 3 +NODE_LABEL = 4 + +RULE_HEAD = 0 +RULE_BODY = 1 + +def do_parse(*files: str, G: lark.Lark = None, from_str: bool = False, start = "plp") -> lark.Tree: "Read all `files` and parse them with grammar `G`, returning a single `lark.Tree`." if G is None: try: @@ -49,6 +59,7 @@ class PreparsingTransformer(lark.Transformer): def __init__(self): super().__init__() self.consts = {} + def __default__(self, _, __, ___): return lark.visitors.Discard def SEMANTICS_OPT_LOGIC(self, O): return str(O) def SEMANTICS_OPT_PROB(self, _): return lark.visitors.Discard @@ -58,6 +69,24 @@ def ID(self, I): return int(I) def constdef(self, C): self.consts[C[0]] = C[1] return lark.visitors.Discard + + # def import_file(self, T): + # try: + # fname = T[0].value + # fext = pathlib.Path(fname).suffix + # if fext not in [".py", ".plp"]: + # raise ValueError(f"Extension not supported for imported file {fname}.") + # with open(fname, "r") as f: + # fcontent = f.read() + # if fext is ".py": + # subTree = self.add_code(fcontent) + # else: + # subTree = parse(fcontent, from_str=True, semantics=self.sem) + # T.children.extend(u for u in subTree.children if u not in T.children) + # return lark.visitors.Discard + # except Exception as ex: + # raise ex + "Verify which logic semantic should be used and record constant definitions." def plp(self, S): return S[0] if len(S) > 0 else None, self.consts @@ -65,9 +94,9 @@ def plp(self, S): class StableTransformer(lark.Transformer): class Pack(tuple): @staticmethod - def __new__(cls, tp: str, r: str = None, v = None, sc: dict = {}): + def __new__(cls, tp: str, r: str = None, v = None, sc: dict = {}, nlabel: tuple = None): return super(StableTransformer.Pack, cls).__new__(cls, (tp, str(v) if r is None else r, \ - r if v is None else v, sc)) + r if v is None else v, sc, nlabel)) def __str__(self): return self[1] def __repr__(self): return f"<{self[0]}: {self.__str__()}>" @@ -78,10 +107,11 @@ def __init__(self, _, consts: dict = {}): self.n_prules = 0 self.consts = consts self.varquery_id = 0 + self.graph = Graph() @staticmethod - def pack(t: str, rep: str = None, val = None, scope: dict = {}) -> tuple[str, str, str, dict]: - return StableTransformer.Pack(t, rep, val, scope) + def pack(type: str, rep: str = None, val = None, scope: dict = {}, nlabel: tuple = None) -> tuple[str, str, str, dict, tuple]: + return StableTransformer.Pack(type, rep, val, scope, nlabel) @staticmethod def join_scope(A: list) -> dict: return dict((y, None) for S in A for y in S[3]) @@ -168,7 +198,7 @@ def WORD(self, c): return self.pack("WORD", str(c)) def NEG(self, n): return self.pack("NEG", str(n)) def VAR(self, v): x = str(v); X = {v: None} - return self.pack("VAR", x, scope = X) + return self.pack("VAR", x, ".*", scope = X) def ID(self, i): return self.pack("ID", val = int(i)) def OP(self, o): return self.pack("OP", str(o)) def REAL(self, r): return self.pack("REAL", val = float(r)) @@ -203,17 +233,23 @@ def set(self, S): def interval(self, I): return self.pack("interval", f"{I[0][2]}..{I[1][2]}", (I[0][2], I[1][2])) # Predicates. - def pred(self, P, replace_semicolons = False): - name = P[0][1] - rep = f"{name}({', '.join(getnths(P[1:], 1))})" - return self.pack("pred", rep.replace(";", ",") if replace_semicolons else rep, name, self.join_scope(P)) - def grpred(self, P): return self.pred(P) + def pred(self, node, replace_semicolons = False): + name = node[0][1] + rep = f"{name}({', '.join(getnths(node[1:], 1))})" + nlabel = f"{name}({', '.join( ('.*' if child[0] == 'VAR' else child[1] for child in node[1:]) )})" + return self.pack("pred", rep.replace(";", ",") if replace_semicolons else rep, name, self.join_scope(node), (nlabel)) + def grpred(self, P): + return self.pred(P) def query_pred(self, P): return self.pred(P, replace_semicolons = True) # Literals. def lit(self, P): s = P[0][0] != "NEG" - return self.pack("lit", " ".join(getnths(P, 1)), (s, P[0][2] if s else P[1][2]), self.join_scope(P)) + expr = P[0][2] if s else P[1][2] + nlabel = (1 if s else -1, P[0][4] if s else P[1][4]) + if nlabel[1] is None: + nlabel = (nlabel[0], expr) + return self.pack("lit", " ".join(getnths(P, 1)), (s, expr), self.join_scope(P), nlabel) def grlit(self, P): return self.lit(P) # Binary operations. @@ -222,27 +258,45 @@ def bop(self, B) -> str: return self.pack("bop", " ".join(getnths(B, 1))) # Facts. def fact(self, F): f = f"{''.join(getnths(F, 1))}" + self.graph.addVertex(f) # Facts are always grounded. - return self.pack("fact", f + ".", f) + return self.pack("fact", f + ".", f) #, F[0][NODE_LABEL]) def pfact(self, PF): p, f = PF[0][2], PF[1][1] + self.graph.addVertex(f) return self.pack("pfact", "", ProbFact(p, f)) def cfact(self, CF): l, u, f = CF[0][2], CF[1][2], CF[2][1] + self.graph.addVertex(f) return self.pack("cfact", "", CredalFact(l, u, f)) def lpfact(self, PF): if PF[0][0] == "prob": p, f = PF[0][2], PF[1][1] else: p, f = 0.5, PF[0][1] + self.graph.addVertex(f) return self.pack("pfact", "", ProbFact(p, f, learnable = True)) # Heads. - def head(self, H): return self.pack("head", ", ".join(getnths(H, 1)), H, self.join_scope(H)) - def ohead(self, H): return self.pack("head", H[0][1], H[0][2], H[0][3]) + def head(self, H): + return self.pack("head", ", ".join(getnths(H, 1)), H, self.join_scope(H), H[0][4]) + def ohead(self, H): + return self.pack("head", H[0][1], H[0][2], H[0][3], H[0][4]) # Body. - def body(self, B): return self.pack("body", ", ".join(getnths(B, 1)), B, self.join_scope(B)) - + def body(self, B): + nlabels = [] + for pred in B: + nlabels.append(pred[NODE_LABEL]) + return self.pack("body", ", ".join(getnths(B, 1)), B, self.join_scope(B), tuple(nlabels)) + # Rules. - def rule(self, R): return self.pack("rule", " :- ".join(getnths(R, 1)) + ".") + def rule(self, R): + hlabel = R[RULE_HEAD][NODE_LABEL] + blabel = R[RULE_BODY][NODE_LABEL] + nlabel = blabel + h, b = R[-2], R[-1] + o = f"{h[1]} :- {b[1]}" + self.graph.addVertex(o) + return self.pack("rule", " :- ".join(getnths(R, 1)) + ".", self.join_scope(R), nlabel=nlabel) + def prule(self, R): l = "LEARN" in getnths(R, 0) e = "EXPAND" in getnths(R, 0) @@ -348,10 +402,15 @@ def data(self, D): test, train = D[2][2], D[3][2] if len(D) > 3 else None return self.pack("data", f"{name}({arg}).", Data(name, arg, test, train)) + + # Python block. def python(self, T): - exec("import torch\n\n" + T[0].value, self.torch_scope) - return self.pack("python", "") + return self.add_code(T[0].value) + + def add_code(self, code): + exec("import torch\n\n" + code, self.torch_scope) + return self.pack("python", "") # Local hubconf repo. def LOCAL_NET(self, L): return self.pack("LOCAL_NET", str(L)) @@ -439,8 +498,12 @@ def nad(self, A): rep = f"{A[0][1]}::{name}({inp}, {A[3][1]}{'' if outcomes is None else f'; {A[offset-1][1]}'}) as {hub_repr} :- {', '.join(getnths(body, 1))}." return self.pack("nad", "", (name, inp, vals, outcomes, net, body, rep, learnable, params)) + + + # Constraint. - def constraint(self, C): return self.pack("constraint", f":- {C[0][1]}.") + def constraint(self, C): + return self.pack("constraint", f":- {C[0][1]}.") # Query elements. def qelement(self, E): @@ -482,7 +545,7 @@ def semantics(self, S): lark.visitors.Discard # Probabilistic Logic Program. - def plp(self, C) -> Program: + def plp(self, nodes) -> Program: # Logic Program. P = [] # Probabilistic Facts. @@ -505,15 +568,29 @@ def plp(self, C) -> Program: # Directives. directives = {} # Mapping. - M = {"pfact": PF, "prule": PR, "query": Q, "varquery": VQ, "cfact": CF, "ad": AD, "nrule": TNR, + map = {"pfact": PF, "prule": PR, "query": Q, "varquery": VQ, "cfact": CF, "ad": AD, "nrule": TNR, "nad": TNA} - for t, L, O, _ in C: - if len(L) > 0: push(P, L) - if t in M: push(M[t], O) - if t == "data": - if O.name in D: D[O.name].append(O) - else: D[O.name] = [O] - if t == "directive": directives[O[0]] = tup if len(tup := O[1:]) > 1 else tup[0] + for type, expr, obj, ctx, nlabel in nodes: + if len(expr) > 0: + push(P, expr) + if type in map: + push(map[type], obj) + if type == "data": + if obj.name in D: D[obj.name].append(obj) + else: D[obj.name] = [obj] + elif type == "directive": + directives[obj[0]] = tup if len(tup := obj[1:]) > 1 else tup[0] + elif type == "rule": + for item in nlabel: + if item is None: + continue + isNegative = item[0] + regex = item[1] + adjacencies = self.graph.searchVertices(regex) + for new_vertex in adjacencies: + vertex = expr if expr[-1] != '.' else expr[:-1] + self.graph.addEdge(vertex, new_vertex, isNegative) + # Deal with ungrounded probabilistic rules. for r in PR: if r.is_prop: PF.append(r.prop_pf) @@ -521,7 +598,7 @@ def plp(self, C) -> Program: self.register_nrule(TNR, NR, D) self.register_nad(TNA, NA, D) return Program("\n".join(P), PF, PR, Q, VQ, CF, AD, NR, NA, semantics = self.sem, \ - directives = directives) + directives = directives, graph = self.graph) class PartialTransformer(StableTransformer): def __init__(self, sem: str, consts: dict = {}): @@ -595,6 +672,10 @@ def prule(self, R): def plp(self, C: list[tuple]) -> Program: # Logic Program. P = [] + # Facts + facts = [] + # Rules + rules = [] # Probabilistic Facts. PF = [] # Probabilistic Rules. @@ -626,24 +707,27 @@ def plp(self, C: list[tuple]) -> Program: P.extend(f"_{x} :- {x}." for x in self.PT) self.check_data(D) self.register_nrule(TNR, NR, D) - self.register_nad(TNA, NA, D) + self.register_nad(TNA, NA, D) return Program("\n".join(P), PF, PR, Q, VQ, CF, AD, NR, NA, semantics = self.sem, \ - stable_p = self.stable_p, directives = directives) + stable_p = self.stable_p, directives = directives, facts=facts, rules=rules) def transform(self, tree): self.o_tree = tree self.stable_p = StableTransformer(self.sem).transform(tree) return super().transform(tree) -def parse(*files: str, G: lark.Lark = None, from_str: bool = False, semantics: str = "stable") -> Program: +def parse(*files: str, grammar: lark.Lark = None, from_str: bool = False, semantics: str = "stable") -> Program: """Either parses `streams` as blocks of text containing the PLP when `from_str = True`, or interprets `streams` as filenames to be read and parsed into a `Program`.""" if semantics not in parse.trans_map: raise ValueError("semantics not supported (must either be 'stable', 'partial' or 'lstable')!") - T = read(*files, G = G, from_str = from_str) - sem, consts = PreparsingTransformer().transform(T) + tree = do_parse(*files, G = grammar, from_str = from_str) + + sem, consts = PreparsingTransformer().transform(tree) if sem is not None: semantics = sem - return parse.trans_map[semantics](semantics, consts).transform(T) + prog = parse.trans_map[semantics](semantics, consts).transform(tree) + return prog + parse.trans_map = {} parse.trans_map["stable"] = StableTransformer parse.trans_map["lstable"] = PartialTransformer diff --git a/pasp/graph.py b/pasp/graph.py new file mode 100644 index 0000000..203c0c0 --- /dev/null +++ b/pasp/graph.py @@ -0,0 +1,146 @@ +# Python Program to detect cycle in an undirected FLOAT +from collections import defaultdict +import re + +INDEX = 0 +LOW_LINK = 1 +ON_STACK = 2 + +INFINITE = float('inf') + +# This class represents a undirected +# graph using adjacency list representation + +class Graph: + + def __init__(self): + # Default dictionary to store graph + self.edges = defaultdict(list) + + #List of vertices + self.vertices = [] + + self.currentComp = [] + self.connectedComps = [] + + def searchVertices(self, pattern): + result = [] + for v in self.vertices: + idx = v.find(":-") + if idx == -1: + label = v + else: + label = v[:idx] + regex = re.compile(pattern) + if regex.search(label): + result.append(v) + return result + + def addVertex(self, v): + if v not in self.vertices: + self.vertices.append(v) + + # Function to add an edge to graph + def addEdge(self, v, w, neg = False): + + # Add w to v_s list + self.edges[v].append((w, neg)) + #self.edges[w].append((v, neg)) + + # A recursive function that uses + # visited[] and parent to detect + # cycle in subgraph reachable from vertex v. + def isCyclicUtil(self, v, visit, parent = ""): + + self.currentComp.append(v) + # Mark the current node as visited + visit[v] = True + + # Recur for all the vertices + # adjacent to this vertex + for vertex, weight in self.edges[v]: + # Process only positive weighted edges + if weight != -1: + # If the node is not + # visited then recurse on it + if not visit[vertex]: + if self.isCyclicUtil(vertex, visit, v): + return True + # If an adjacent vertex is + # visited and not parent + # of current vertex, + # then there is a cycle + elif parent != '' and parent != vertex: + return True + # Return True for self references + elif vertex == v: + return True + + return False + + # Returns true if the graph + # contains a positive cycle, else false. + + def isPositiveCyclic(self): + # Mark all the vertices + # as not visited + visit = {k: False for k in self.vertices} + + # Call the recursive helper + # function to detect a positive cycle in different + # DFS trees + for vertex, visited in visit.items(): + # Don't recur for u if it is already visited + if not visited: + if self.isCyclicUtil(vertex, visit): + return True + + return False + + def isNegCycleBellmanFord(self, src, dist, visit): + V = len(self.vertices) + + # Step 1: Initialize distances from src + # to all other vertices as INFINITE + dist = {k: INFINITE for k in self.vertices} + dist[src] = 0 + + # Step 2: Relax all edges |V| - 1 times. + # A simple shortest path from src to any + # other vertex can have at-most |V| - 1 + # edges + for i in range(1, V): + for u, adjacencies in self.edges.items(): + for v, weight in adjacencies: + if (dist[u] != INFINITE and dist[u] + weight < dist[v]): + dist[v] = dist[u] + weight + if not visit[v]: + visit[v] = True + + # Step 3: check for negative-weight cycles. + # The above step guarantees shortest distances + # if graph doesn't contain negative weight cycle. + # If we get a shorter path, then there + # is a cycle. + for u, adjacencies in self.edges.items(): + for v, weight in adjacencies: + if (dist[u] != INFINITE and dist[u] + weight < dist[v]): + return True + + return False + + def isNegativeCyclic(self): + # Initialize list of visited vertices + # to all other vertices as INFINITE + dist = {k: INFINITE for k in self.vertices} + visit = {k: False for k in self.vertices} + + # Call Bellman-Ford for all those vertices + # that are not visited + for vertex, visited in visit.items(): + if not visited: + visited = True + if (self.isNegCycleBellmanFord(vertex, dist, visit)): + return True + + return False \ No newline at end of file diff --git a/pasp/program.py b/pasp/program.py index 0fe0d38..5722c16 100644 --- a/pasp/program.py +++ b/pasp/program.py @@ -355,7 +355,7 @@ class Program: def __init__(self, P: str, PF: list[ProbFact], PR: list[ProbRule], Q: list[Query], \ VQ: list[VarQuery], CF: list[CredalFact], AD: list[AnnotatedDisjunction], \ NR: list[NeuralRule], NA: list[NeuralAD], semantics: Semantics = Semantics.STABLE, \ - stable_p = None, directives: list = None): + stable_p = None, directives: list = None, graph = None): """ Constructs a PLP out of a logic program `P`, probabilistic facts `PF`, credal facts `CF` and queries `Q`. @@ -369,6 +369,7 @@ def __init__(self, P: str, PF: list[ProbFact], PR: list[ProbRule], Q: list[Query self.AD = AD self.NR = NR self.NA = NA + self.graph = graph # Number of instances in data. self.m_test = 0 @@ -417,6 +418,9 @@ def __str__(self) -> str: def __repr__(self) -> str: return self.__str__() def __call__(self, **kwargs): + self.run(kwargs) + + def run(self, **kwargs): if self.directives is not None: if "learn" in self.directives: f, A = self.directives["learn"]