-
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
4 changed files
with
114 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,7 +1,6 @@ | ||
--- | ||
authors: | ||
- andreashappe | ||
- brandl | ||
date: 2024-10-14 | ||
categories: | ||
- 'initial-journey' | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,113 @@ | ||
import operator | ||
|
||
from typing import Annotated, List, Tuple, Union | ||
from typing_extensions import TypedDict | ||
from pydantic import BaseModel, Field | ||
|
||
from langchain_core.prompts import ChatPromptTemplate | ||
from langgraph.graph import StateGraph, START, END | ||
|
||
### Prompts | ||
|
||
planner_prompt = ChatPromptTemplate.from_messages( | ||
[ | ||
( | ||
"system", | ||
"""For the given objective, come up with a simple step by step plan. \ | ||
This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \ | ||
The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps.""", | ||
), | ||
("placeholder", "{messages}"), | ||
] | ||
) | ||
|
||
replanner_prompt = ChatPromptTemplate.from_template( | ||
"""For the given objective, come up with a simple step by step plan. \ | ||
This plan should involve individual tasks, that if executed correctly will yield the correct answer. Do not add any superfluous steps. \ | ||
The result of the final step should be the final answer. Make sure that each step has all the information needed - do not skip steps. | ||
Your objective was this: | ||
{input} | ||
Your original plan was this: | ||
{plan} | ||
You have currently done the follow steps: | ||
{past_steps} | ||
Your notes are: | ||
{notes} | ||
Update your plan accordingly. If no more steps are needed and you can return to the user, then respond with that. Otherwise, fill out the plan. Only add steps to the plan that still NEED to be done. Do not return previously done steps as part of the plan. | ||
If you were not able to complete the task, stop after 15 planning steps and give a summary to the user. | ||
""" | ||
) | ||
|
||
### Data Structures: Our State Structure | ||
class PlanExecute(TypedDict): | ||
input: str # the initial user-given objective | ||
plan: List[str] | ||
past_steps: Annotated[List[Tuple], operator.add] | ||
notes: str | ||
response: str # response from the agent to the user | ||
|
||
### Data Structures: LLM Responses | ||
class Plan(BaseModel): | ||
"""Plan to follow in future""" | ||
|
||
steps: List[str] = Field( | ||
description="different steps to follow, should be in sorted order" | ||
) | ||
|
||
class Response(BaseModel): | ||
"""Response to user.""" | ||
response: str | ||
|
||
class Act(BaseModel): | ||
"""Action to perform.""" | ||
|
||
action: Union[Response, Plan] = Field( | ||
description="Action to perform. If you want to respond to user, use Response. " | ||
"If you need to further use tools to get the answer, use Plan." | ||
) | ||
|
||
### create the graph | ||
|
||
def create_plan_and_execute_graph(llm, execute_step): | ||
|
||
def should_end(state: PlanExecute): | ||
if "response" in state and state["response"]: | ||
return END | ||
else: | ||
return "agent" | ||
|
||
def plan_step(state: PlanExecute): | ||
planner = planner_prompt | llm.with_structured_output(Plan) | ||
plan = planner.invoke({"messages": [("user", state["input"])]}) | ||
return {"plan": plan.steps} | ||
|
||
def replan_step(state: PlanExecute): | ||
replanner = replanner_prompt | llm.with_structured_output(Act) | ||
output = replanner.invoke(state) | ||
if isinstance(output.action, Response): | ||
return {"response": output.action.response} | ||
else: | ||
return {"plan": output.action.steps} | ||
|
||
workflow = StateGraph(PlanExecute) | ||
|
||
# Add the nodes | ||
workflow.add_node("planner", plan_step) | ||
workflow.add_node("agent", execute_step) | ||
workflow.add_node("replan", replan_step) | ||
|
||
# set the start node | ||
workflow.add_edge(START, "planner") | ||
|
||
# configure links between nodes | ||
workflow.add_edge("planner", "agent") | ||
workflow.add_edge("agent", "replan") | ||
workflow.add_conditional_edges("replan", should_end) | ||
|
||
return workflow |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters