Skip to content

Commit

Permalink
fixed
Browse files Browse the repository at this point in the history
  • Loading branch information
Brandl committed Oct 18, 2024
1 parent fd5e11e commit 4ca55fe
Show file tree
Hide file tree
Showing 4 changed files with 114 additions and 6 deletions.
1 change: 0 additions & 1 deletion docs/blog/posts/2024-10-14-plan-and-exec.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
---
authors:
- andreashappe
- brandl
date: 2024-10-14
categories:
- 'initial-journey'
Expand Down
4 changes: 0 additions & 4 deletions src/graphs/plan_and_execute.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,6 @@
You have currently done the follow steps:
{past_steps}
Your notes are:
{notes}
Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan.
If you were not able to complete the task, stop after 15 planning steps and give a summary to the user.
Expand All @@ -49,7 +46,6 @@ class PlanExecute(TypedDict):
input: str # the initial user-given objective
plan: List[str]
past_steps: Annotated[List[Tuple], operator.add]
notes: str
response: str # response from the agent to the user

### Data Structures: LLM Responses
Expand Down
113 changes: 113 additions & 0 deletions src/graphs/plan_and_execute_with_notes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import operator

from typing import Annotated, List, Tuple, Union
from typing_extensions import TypedDict
from pydantic import BaseModel, Field

from langchain_core.prompts import ChatPromptTemplate
from langgraph.graph import StateGraph, START, END

### Prompts

planner_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"""For the given objective, come up with a simple step by step plan. \
This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \
The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps.""",
),
("placeholder", "{messages}"),
]
)

replanner_prompt = ChatPromptTemplate.from_template(
"""For the given objective, come up with a simple step by step plan. \
This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \
The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps.
Your objective was this:
{input}
Your original plan was this:
{plan}
You have currently done the follow steps:
{past_steps}
Your notes are:
{notes}
Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan.
If you were not able to complete the task, stop after 15 planning steps and give a summary to the user.
"""
)

### Data Structures: Our State Structure
class PlanExecute(TypedDict):
input: str # the initial user-given objective
plan: List[str]
past_steps: Annotated[List[Tuple], operator.add]
notes: str
response: str # response from the agent to the user

### Data Structures: LLM Responses
class Plan(BaseModel):
"""Plan to follow in future"""

steps: List[str] = Field(
description="different steps to follow, should be in sorted order"
)

class Response(BaseModel):
"""Response to user."""
response: str

class Act(BaseModel):
"""Action to perform."""

action: Union[Response, Plan] = Field(
description="Action to perform. If you want to respond to user, use Response. "
"If you need to further use tools to get the answer, use Plan."
)

### create the graph

def create_plan_and_execute_graph(llm, execute_step):

def should_end(state: PlanExecute):
if "response" in state and state["response"]:
return END
else:
return "agent"

def plan_step(state: PlanExecute):
planner = planner_prompt | llm.with_structured_output(Plan)
plan = planner.invoke({"messages": [("user", state["input"])]})
return {"plan": plan.steps}

def replan_step(state: PlanExecute):
replanner = replanner_prompt | llm.with_structured_output(Act)
output = replanner.invoke(state)
if isinstance(output.action, Response):
return {"response": output.action.response}
else:
return {"plan": output.action.steps}

workflow = StateGraph(PlanExecute)

# Add the nodes
workflow.add_node("planner", plan_step)
workflow.add_node("agent", execute_step)
workflow.add_node("replan", replan_step)

# set the start node
workflow.add_edge(START, "planner")

# configure links between nodes
workflow.add_edge("planner", "agent")
workflow.add_edge("agent", "replan")
workflow.add_conditional_edges("replan", should_end)

return workflow
2 changes: 1 addition & 1 deletion src/plan_and_execute_and_scribe.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from helper.ui import print_event
from tools.ssh import get_ssh_connection_from_env, SshTestCredentialsTool, SshExecuteTool
from graphs.execute_with_scribe import create_chat_tool_scribe_agent_graph
from graphs.plan_and_execute import create_plan_and_execute_graph
from graphs.plan_and_execute_with_notes import create_plan_and_execute_graph

# setup configuration from environment variables
load_dotenv()
Expand Down

0 comments on commit 4ca55fe

Please sign in to comment.