From 4ca55fe9f19c3fd41a6ecbc80629b76e72d900a2 Mon Sep 17 00:00:00 2001 From: Brandl Date: Fri, 18 Oct 2024 16:59:36 +0200 Subject: [PATCH] fixed --- docs/blog/posts/2024-10-14-plan-and-exec.md | 1 - src/graphs/plan_and_execute.py | 4 - src/graphs/plan_and_execute_with_notes.py | 113 ++++++++++++++++++++ src/plan_and_execute_and_scribe.py | 2 +- 4 files changed, 114 insertions(+), 6 deletions(-) create mode 100644 src/graphs/plan_and_execute_with_notes.py diff --git a/docs/blog/posts/2024-10-14-plan-and-exec.md b/docs/blog/posts/2024-10-14-plan-and-exec.md index c5f576b..91900d9 100644 --- a/docs/blog/posts/2024-10-14-plan-and-exec.md +++ b/docs/blog/posts/2024-10-14-plan-and-exec.md @@ -1,7 +1,6 @@ --- authors: - andreashappe - - brandl date: 2024-10-14 categories: - 'initial-journey' diff --git a/src/graphs/plan_and_execute.py b/src/graphs/plan_and_execute.py index 0743fe8..2c6195c 100644 --- a/src/graphs/plan_and_execute.py +++ b/src/graphs/plan_and_execute.py @@ -35,9 +35,6 @@ You have currently done the follow steps: {past_steps} -Your notes are: -{notes} - Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan. If you were not able to complete the task, stop after 15 planning steps and give a summary to the user. @@ -49,7 +46,6 @@ class PlanExecute(TypedDict): input: str # the initial user-given objective plan: List[str] past_steps: Annotated[List[Tuple], operator.add] - notes: str response: str # response from the agent to the user ### Data Structures: LLM Responses diff --git a/src/graphs/plan_and_execute_with_notes.py b/src/graphs/plan_and_execute_with_notes.py new file mode 100644 index 0000000..0743fe8 --- /dev/null +++ b/src/graphs/plan_and_execute_with_notes.py @@ -0,0 +1,113 @@ +import operator + +from typing import Annotated, List, Tuple, Union +from typing_extensions import TypedDict +from pydantic import BaseModel, Field + +from langchain_core.prompts import ChatPromptTemplate +from langgraph.graph import StateGraph, START, END + +### Prompts + +planner_prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + """For the given objective, come up with a simple step by step plan. \ +This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \ +The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps.""", + ), + ("placeholder", "{messages}"), + ] +) + +replanner_prompt = ChatPromptTemplate.from_template( + """For the given objective, come up with a simple step by step plan. \ +This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \ +The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps. + +Your objective was this: +{input} + +Your original plan was this: +{plan} + +You have currently done the follow steps: +{past_steps} + +Your notes are: +{notes} + +Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan. + +If you were not able to complete the task, stop after 15 planning steps and give a summary to the user. +""" +) + +### Data Structures: Our State Structure +class PlanExecute(TypedDict): + input: str # the initial user-given objective + plan: List[str] + past_steps: Annotated[List[Tuple], operator.add] + notes: str + response: str # response from the agent to the user + +### Data Structures: LLM Responses +class Plan(BaseModel): + """Plan to follow in future""" + + steps: List[str] = Field( + description="different steps to follow, should be in sorted order" + ) + +class Response(BaseModel): + """Response to user.""" + response: str + +class Act(BaseModel): + """Action to perform.""" + + action: Union[Response, Plan] = Field( + description="Action to perform. If you want to respond to user, use Response. " + "If you need to further use tools to get the answer, use Plan." + ) + +### create the graph + +def create_plan_and_execute_graph(llm, execute_step): + + def should_end(state: PlanExecute): + if "response" in state and state["response"]: + return END + else: + return "agent" + + def plan_step(state: PlanExecute): + planner = planner_prompt | llm.with_structured_output(Plan) + plan = planner.invoke({"messages": [("user", state["input"])]}) + return {"plan": plan.steps} + + def replan_step(state: PlanExecute): + replanner = replanner_prompt | llm.with_structured_output(Act) + output = replanner.invoke(state) + if isinstance(output.action, Response): + return {"response": output.action.response} + else: + return {"plan": output.action.steps} + + workflow = StateGraph(PlanExecute) + + # Add the nodes + workflow.add_node("planner", plan_step) + workflow.add_node("agent", execute_step) + workflow.add_node("replan", replan_step) + + # set the start node + workflow.add_edge(START, "planner") + + # configure links between nodes + workflow.add_edge("planner", "agent") + workflow.add_edge("agent", "replan") + workflow.add_conditional_edges("replan", should_end) + + return workflow \ No newline at end of file diff --git a/src/plan_and_execute_and_scribe.py b/src/plan_and_execute_and_scribe.py index 0b3c88f..732ff5e 100644 --- a/src/plan_and_execute_and_scribe.py +++ b/src/plan_and_execute_and_scribe.py @@ -11,7 +11,7 @@ from helper.ui import print_event from tools.ssh import get_ssh_connection_from_env, SshTestCredentialsTool, SshExecuteTool from graphs.execute_with_scribe import create_chat_tool_scribe_agent_graph -from graphs.plan_and_execute import create_plan_and_execute_graph +from graphs.plan_and_execute_with_notes import create_plan_and_execute_graph # setup configuration from environment variables load_dotenv()