essay-writer.ipynbμμλ Easy Writerμ μ€νν΄λ³΄κ³ λμμ νμΈν μ μμ΅λλ€. Easy Writerλ lambda_function.pyμμ ꡬνλ μ½λλ₯Ό νμΈν μ μμ΅λλ€. deep learning.aiμ Essay Writerλ LangGrapμ workflowλ₯Ό μ΄μ©νμ¬ μ£Όμ΄μ§ μ£Όμ μ μ ν©ν Essayλ₯Ό μμ±ν μ μλλ‘ λμμ€λλ€. reflection-agent.mdμμ μ°¨μ΄μ μ reflection agendμμλ μΈλΆ κ²μμμ΄ reflectionμ μ΄μ©ν΄ LLMμΌλ‘ μμ±λ essayλ₯Ό μ λ°μ΄νΈ νλκ²μ λΉν΄ easy writerμμλ μΈν°λ· κ²μμ νμν keywordλ₯Ό reflectionμΌλ‘ μ λ°μ΄νΈνκ³ μμ΅λλ€. μ±λ₯μ κ²μλ λ°μ΄ν°μ μ§κ³Ό μμ λ°λΌ λ¬λΌμ§λ―λ‘ μ±λ₯μ λΉκ΅λ³΄λ€λ workflowλ₯Ό μ΄ν΄νλ μ©λλ‘ νμ©ν©λλ€.
Easy writerμ activity diagramμ μλμ κ°μ΅λλ€.
LangGraphλ₯Ό μν State Classλ μλμ κ°μ΅λλ€.
class State(TypedDict):
task: str
plan: list[str]
essay: str
critique: str
content: List[str]
revision_number: int
max_revisions: int
μμΈμ΄μ μ£Όμ λ₯Ό μ€λͺ νλ Planμ μλμ κ°μ΄ μ€λΉν©λλ€. Plan ν΄λμ€λ with_structured_outputλ₯Ό μ΄μ©ν΄ Planμ μΆμΆν λμ μ¬μ©ν©λλ€.
class Plan(BaseModel):
"""List of session topics and outline as a json format"""
steps: List[str] = Field(
description="different sessions to follow, should be in sorted order without numbers. Eash session has detailed description"
)
def get_planner():
system = """You are an expert writer tasked with writing a high level outline of an essay. \
Write such an outline for the user provided topic. Give an outline of the essay along with any relevant notes \
or instructions for the sections. \
Make sure that each session has all the information needed."""
planner_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("placeholder", "{messages}"),
]
)
chat = get_chat()
planner = planner_prompt | chat
return planner
def plan(state: State):
print("###### plan ######")
print('task: ', state["task"])
task = [HumanMessage(content=state["task"])]
planner = get_planner()
response = planner.invoke({"messages": task})
print('response.content: ', response.content)
chat = get_chat()
structured_llm = chat.with_structured_output(Plan, include_raw=True)
info = structured_llm.invoke(response.content)
print('info: ', info)
if not info['parsed'] == None:
parsed_info = info['parsed']
# print('parsed_info: ', parsed_info)
print('steps: ', parsed_info.steps)
return {
"task": state["task"],
"plan": parsed_info.steps
}
else:
print('parsing_error: ', info['parsing_error'])
return {"plan": []}
μ΄λ μ»μ΄μ§λ λ°μ΄ν°μ ννλ μλμ κ°μ΅λλ€.
{'task': 'μ¦κ²κ² μ¬λ λ°©λ²',
'plan': ['ν볡μ μ μμ μ€μμ±μ λν κ°λ΅ν μ€λͺ
κ³Ό μ£Όμ μ μ',
'κΈμ μ μΈ λ§μκ°μ§μ μ μ§νκΈ° μν λ°©λ²λ€ - λΆμ μ μκ°μ κΈμ μ μΌλ‘ μ ν, κ°μ¬νλ μ΅κ΄, λκ΄μ£Όμμ ν¬λ§μ μ¬κ³ μ μ€μμ±',
'κ· ν μ‘ν μνμ μμνκΈ° μν λ°©λ²λ€ - μΌκ³Ό ν΄μμ κ· ν, μ΄λκ³Ό 건κ°ν μμ΅κ΄, μ·¨λ―Έμνκ³Ό μ¬κ°νλ',
'μΈκ°κ΄κ³λ₯Ό μμ€ν μ¬κΈ°λ λ°©λ²λ€ - κ°μ‘±/μΉκ΅¬/λλ£μμ κ΄κ³ λλ
ν νκΈ°, νμΈ λ°°λ €μ μ΄ν΄, μ¬νμ μ λκ° νμ±',
'μκΈ°κ³λ°κ³Ό μ±μ₯μ μΆκ΅¬νλ λ°©λ²λ€ - μλ‘μ΄ κ² λ°°μ°κΈ°μ λμ , μ₯κΈ° λͺ©ν μ€μ κ³Ό μ±μ·¨ λ
Έλ ₯, μμ /μ μ μ μ±μ₯μ κ°μΉ',
'μ£Όμ λ΄μ© μμ½ λ° μ¦κ±°μ΄ μΆμ μν ν΅ν©μ μ κ·Όμ νμμ± κ°μ‘°']}
μΉκ²μμ μν keywordλ₯Ό μμ±νκΈ° μνμ¬ Queries ν΄λμ€λ₯Ό μ μν νμ μλμ κ°μ΄ Queryλ₯Ό μΆμΆν©λλ€.
from langchain_core.pydantic_v1 import BaseModel
class Queries(BaseModel):
queries: List[str]
def research_plan(state: State):
task = state['task']
print('task: ', task)
system = """You are a researcher charged with providing information that can \
be used when writing the following essay. Generate a list of search queries that will gather \
any relevant information. Only generate 3 queries max."""
research_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "{task}"),
]
)
chat = get_chat()
research = research_prompt | chat
response = research.invoke({"task": task})
print('response.content: ', response.content)
chat = get_chat()
structured_llm = chat.with_structured_output(Queries, include_raw=True)
info = structured_llm.invoke(response.content)
# print('info: ', info)
if not info['parsed'] == None:
queries = info['parsed']
print('queries: ', queries.queries)
content = state["content"] if state.get("content") is not None else []
search = TavilySearchResults(k=2)
for q in queries.queries:
response = search.invoke(q)
# print('response: ', response)
for r in response:
content.append(r['content'])
return {
"task": state['task'],
"plan": state['plan'],
"content": content,
}
μ΄λ μ»μ΄μ§λ 쿼리λ₯Ό μν keywordλ μλμ κ°μ΅λλ€. μΉκ²μμ κ²°κ³Όλ contentμ μ μ₯λμ΄ generationμμ νμ©ν©λλ€.
queries: ['ν볡ν μΆμ λ°©λ²', 'κΈμ μ λ§μΈλ κΈ°λ₯΄κΈ°', 'μΌμμμ ν볡 μ°ΎκΈ° ν']
generation()μμλ Planκ³Ό μΉκ²μμΌλ‘ μ»μ΄μ§ μ λ³΄μΈ contentλ₯Ό μ΄μ©ν΄ μμΈμ΄λ₯Ό μμ±ν©λλ€.
def generation(state: State):
content = "\n\n".join(state['content'] or [])
system = """You are an essay assistant tasked with writing excellent 5-paragraph essays.\
Generate the best essay possible for the user's request and the initial outline. \
If the user provides critique, respond with a revised version of your previous attempts. \
Utilize all the information below as needed:
<content>
{content}
</content>
"""
prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "{task}\n\nHere is my plan:\n\n{plan}"),
]
)
chat = get_chat()
chain = prompt | chat
response = chain.invoke({
"content": content,
"task": state['task'],
"plan": state['plan']
})
revision_number = state["revision_number"] if state.get("revision_number") is not None else 1
return {
"essay": response,
"revision_number": revision_number + 1
}
μ΄λ μμ±λ μμΈμ΄λ μλμ κ°μ΅λλ€.
{'essay': AIMessage(content='ν볡ν μΆμ μ΄κΈ° μν κΈΈμ‘μ΄\n\nλͺ¨λκ° ν볡ν μΆμ κΏκΎΈμ§λ§ μ€μ λ‘ ν볡μ λλΌλ κ²μ μ½μ§ μμ΅λλ€. ν볡μ λ¨μν λ¬Όμ§μ νμλ μΌμμ μΈ κΈ°μ¨μ λμ΄μλ μΆμ λ§μ‘±κ³Ό μΆ©λ§ν¨μ μλ―Έν©λλ€. μ§μ ν ν볡μ μν΄μλ μ°λ¦¬μ λ§μκ°μ§κ³Ό μν λ°©μμ μ£Όλͺ©ν΄μΌ ν©λλ€.\n\n첫째, κΈμ μ μΈ λ§μκ°μ§μ κΈ°λ₯΄λ κ²μ΄ μ€μν©λλ€. λΆμ μ μΈ μκ°κ³Ό κ°μ μ μ¬λ‘μ‘ν기보λ€λ λκ΄μ μ΄κ³ ν¬λ§μ μΈ κ΄μ μ κ°μ ΈμΌ ν©λλ€. μμ μΌμμ κΈ°μ¨μ κ°μ¬νλ μ΅κ΄μ λ€μ΄κ³ , μ΄λ €μ΄ μν©μμλ κΈμ μ μΈ λ©΄μ μ°Ύμ보μΈμ. \n\nλμ§Έ, κ· ν μ‘ν μν 리λ¬μ μ μ§νλ κ²μ΄ ν볡μ μ΄μ μ
λλ€. μΌκ³Ό ν΄μμ μ μ ν μ‘°νλ₯Ό μ΄λ£¨κ³ , μ΄λκ³Ό 건κ°ν μμ΅κ΄μΌλ‘ λͺΈκ³Ό λ§μμ 건κ°μ λ보μΈμ. λν μ·¨λ―Έμνμ΄λ μ¬κ°νλμ ν΅ν΄ μ¬λ―Έμ νλ ₯μ μ»μ μ μμ΅λλ€.\n\nμ
μ§Έ, μΈκ°κ΄κ³λ₯Ό μμ€ν μ¬κΈ°λ μμΈκ° νμν©λλ€. κ°μ‘±, μΉκ΅¬, λλ£λ€κ³Ό μ μμ μ λλ₯Ό λλ
ν νκ³ μλ‘λ₯Ό μ΄ν΄νκ³ λ°°λ €νλ λ§μμ κ°μ ΈμΌ ν©λλ€. νμΈκ³Όμ μ λκ°μ ν볡κ°μ λμ΄λ μμ²μ΄ λ©λλ€.\n\nλ·μ§Έ, μκΈ°κ³λ°κ³Ό μ±μ₯μ κΈ°νλ₯Ό λμΉμ§ λ§μΈμ. μλ‘μ΄ κ²μ λ°°μ°κ³ λμ νλ©° μ₯κΈ°μ μΈ λͺ©νλ₯Ό ν₯ν΄ λμκ°λ κ³Όμ μμ μ±μ·¨κ°κ³Ό 보λμ λλ μ μμ΅λλ€. μμ , μ μ μ μ±μ₯ λν ν볡μ μ΄λ₯΄λ μ€μν κΈΈμ‘μ΄κ° λ κ²μ
λλ€.\n\nλ§μ§λ§μΌλ‘ ν볡μ μ΄ λͺ¨λ μμλ€μ΄ μ‘°νλ₯Ό μ΄λ£° λ λΉλ‘μ κ°λ₯ν΄μ§λλ€. κΈμ μ λ§μκ°μ§, κ· ν μ‘ν μν, μΈκ°κ΄κ³, μκΈ°κ³λ°μ ν΅ν©μ μΌλ‘ μΆκ΅¬ν λ μ§μ ν νλ³΅μ΄ μ°λ¦¬ κ³μ λ¨Έλ¬Ό κ²μ
λλ€. μ€λλΆν° μμ μ€μ²μΌλ‘ ν볡ν μΆμ μ¬μ μ μμν΄λ³΄μΈμ.', additional_kwargs={'usage': {'prompt_tokens': 2806, 'completion_tokens': 826, 'total_tokens': 3632}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, response_metadata={'usage': {'prompt_tokens': 2806, 'completion_tokens': 826, 'total_tokens': 3632}, 'stop_reason': 'end_turn', 'model_id': 'anthropic.claude-3-sonnet-20240229-v1:0'}, id='run-8490a66f-b9bc-4192-b6c6-7f0e34ebeb91-0', usage_metadata={'input_tokens': 2806, 'output_tokens': 826, 'total_tokens': 3632}),
'revision_number': 2}
μμ±λ μμΈμ΄λ₯Ό μ΄μ©νμ¬ νκ°(critique)λ₯Ό μμ±ν©λλ€.
def reflection(state: State):
"""You are a teacher grading an essay submission. \
Generate critique and recommendations for the user's submission. \
Provide detailed recommendations, including requests for length, depth, style, etc."""
reflection_prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"λΉμ μ κ΅μ¬λ‘μ νμ
μ μμΈμ΄λ₯Ό νκ°νμ½λλ€. λΉνκ³Ό κ°μ μ¬νμ μΉμ νκ² μ€λͺ
ν΄μ£ΌμΈμ."
"μ΄λ μ₯μ , λ¨μ , κΈΈμ΄, κΉμ΄, μ€νμΌλ±μ λν΄ μΆ©λΆν μ 보λ₯Ό μ 곡ν©λλ€."
"κ° λ¬Έλ¨μ κΈΈμ΄λ μ΅μ 200μ μ΄μμ΄ λλλ‘ κ΄λ ¨λ μμ λ₯Ό μΆ©λΆν ν¬ν¨ν©λλ€.",
),
("human", "{essay}"),
]
)
chat = get_chat()
reflect = reflection_prompt | chat
res = reflect.invoke({"essay": state['essay'].content})
response = HumanMessage(content=res.content)
return {
"critique": response,
"revision_number": int(state['revision_number'])
}
νκ°μ κ²°κ³Όλ μλ μλμ κ°μ΅λλ€.
'μ λ°μ μΌλ‘ ν볡ν μΆμ μ΄κΈ° μν μ’μ μ§μΉ¨λ€μ μ μνκ³ μμ΅λλ€. λ€μμ λͺ κ°μ§ μ₯μ κ³Ό κ°μ μ¬νμ λν νΌλλ°±μ
λλ€.\n\nμ₯μ :\n\n1. ν볡μ μλ―Έλ₯Ό μ μ μνκ³ , λ¬Όμ§μ νμλ μΌμμ κΈ°μ¨μ λμ΄μλ μΆμ λ§μ‘±κ³Ό μΆ©λ§ν¨μ΄λΌκ³ μ€λͺ
ν μ μ΄ μ’μ΅λλ€.\n\n2. ν볡μ μν ꡬ체μ μΈ μμλ€(κΈμ μ λ§μκ°μ§, κ· ν μ‘ν μν, μΈκ°κ΄κ³, μκΈ°κ³λ°)μ μ μ μνκ³ μμ΅λλ€. \n\n3. κ° μμμ λν΄ κ΅¬μ²΄μ μΈ μ€μ² λ°©μμ μ μνκ³ μμ΄ μ€μ©μ μ
λλ€. μλ₯Ό λ€μ΄ "μμ μΌμμ κΈ°μ¨μ κ°μ¬νλ μ΅κ΄"μ΄λ "μ΄λκ³Ό 건κ°ν μμ΅κ΄"κ³Ό κ°μ ꡬ체μ μΈ λ°©λ²λ€μ μΈκΈνκ³ μμ΅λλ€.\n\n4. λ§μ§λ§ λΆλΆμμ ν볡μ μμλ€μ΄ μ‘°νλ₯Ό μ΄λ£¨μ΄μΌ ν¨μ κ°μ‘°νκ³ μμ΄ κ· ν μ‘ν κ΄μ μ μ μνκ³ μμ΅λλ€.\n\nκ°μ μ¬ν:\n\n1. κ° λ¬Έλ¨μ κΈΈμ΄κ° λ€μ 짧μ νΈμ
λλ€. κ° μμμ λν΄ μ’ λ ꡬ체μ μΈ μμλ μ€λͺ
μ μΆκ°νλ©΄ μ΄ν΄λλ₯Ό λμΌ μ μμ κ²μ
λλ€.\n\n2. κΈμ μ λ§μκ°μ§μ κΈ°λ₯΄λ ꡬ체μ μΈ λ°©λ²(μ: λͺ
μ, κΈμ μ μκΈ° λν λ±)μ λν μ€λͺ
μ΄ λΆμ‘±ν©λλ€.\n\n3. μΈκ°κ΄κ³ λΆλΆμμ κ°μ‘±, μΉκ΅¬ μΈμλ μ§μμ¬νλ μ¬νμ μ λκ°μ μ€μμ±μ λν΄ μΈκΈν μ μμ΅λλ€.\n\n4. μκΈ°κ³λ°κ³Ό μ±μ₯μ ꡬ체μ μΈ λ°©λ²(μ: μλ‘μ΄ κΈ°μ λ°°μ°κΈ°, μμλ΄μ¬ λ±)μ λν μ€λͺ
μ΄ λΆμ‘±ν©λλ€.\n\n5. ν볡μ λ°©ν΄νλ μμΈλ€(μ: μ€νΈλ μ€, λΆμ μ μ΅κ΄ λ±)μ λν μΈκΈκ³Ό μ΄λ₯Ό 극볡νλ λ°©λ²μ λν μ‘°μΈμ΄ μΆκ°λλ©΄ μ’μ κ² κ°μ΅λλ€.\n\nμ λ°μ μΌλ‘ ν볡ν μΆμ μν μ’μ μ§μΉ¨μ μ μνκ³ μμ§λ§, κ° μμμ λν ꡬ체μ μΈ μμμ μ€λͺ
μ 보μνκ³ κ· ν μ‘ν κ΄μ μ λ κ°μ‘°νλ€λ©΄ λμ± λμμ΄ λ κ²μ
λλ€.'
νκ°λ₯Ό μ΄μ©νμ¬ μλ‘μ΄ κ²μ keywordλ₯Ό μμ±νκ³ Tavilyλ₯Ό μ΄μ©ν μΉκ²μμ ν΅ν΄ contentλ₯Ό μ λ°μ΄νΈ ν©λλ€.
def research_critique(state: State):
system = """You are a researcher charged with providing information that can \
be used when making any requested revisions (as outlined below). \
Generate a list of search queries that will gather any relevant information. Only generate 3 queries max."""
critique_prompt = ChatPromptTemplate.from_messages(
[
("system", system),
("human", "{critique}"),
]
)
chat = get_chat()
critique = critique_prompt | chat
response = critique.invoke({"critique": state['critique']})
print('response.content: ', response.content)
chat = get_chat()
structured_llm = chat.with_structured_output(Queries, include_raw=True)
info = structured_llm.invoke(response.content)
# print('info: ', info)
content = ""
if not info['parsed'] == None:
queries = info['parsed']
print('queries: ', queries.queries)
content = state["content"] if state.get("content") is not None else []
search = TavilySearchResults(k=2)
for q in queries.queries:
response = search.invoke(q)
# print('response: ', response)
for r in response:
content.append(r['content'])
return {
"content": content,
"revision_number": int(state['revision_number'])
}
should_continue()μμλ max_revision λ°λ³΅νλλ‘ ν©λλ€.
def should_continue(state, config):
max_revisions = config.get("configurable", {}).get("max_revisions", MAX_REVISIONS)
print("max_revisions: ", max_revisions)
if state["revision_number"] > max_revisions:
return "end"
return "contine"
Workflowμ μν Graphλ₯Ό μ€λΉν©λλ€.
workflow = StateGraph(State)
workflow.add_node("planner", plan)
workflow.add_node("generation", generation)
workflow.add_node("reflection", reflection)
workflow.add_node("research_plan", research_plan)
workflow.add_node("research_critique", research_critique)
workflow.set_entry_point("planner")
workflow.add_conditional_edges(
"generation",
should_continue,
{
"end": END,
"contine": "reflection"}
)
workflow.add_edge("planner", "research_plan")
workflow.add_edge("research_plan", "generation")
workflow.add_edge("reflection", "research_critique")
workflow.add_edge("research_critique", "generation")
app = workflow.compile()
μ΄λ μ»μ΄μ§ Graphλ μλμ κ°μ΅λλ€.
from IPython.display import Image, display
display(Image(app.get_graph().draw_mermaid_png()))
μλμ κ°μ΄ μ€νν©λλ€.
inputs = {"task": "μ¦κ²κ² μ¬λ λ°©λ²"}
config = {
"recursion_limit": 50,
"max_revisions": 2,
}
for output in app.stream(inputs, config=config):
for key, value in output.items():
print(f"Finished: {key}")
print("Final: ", value["essay"])
μ€νν κ²°κ³Όλ μλμ κ°μ΅λλ€.
LangSmithλ‘ νμΈν λμμ μλμ κ°μ΅λλ€. μ 체 138μ΄κ° μμλμμ΅λλ€.
Essay Writerμ λ΄μ©μ μ 리ν©λλ€.
μ 체 Graphμ ꡬμ±λλ μλμ κ°μ΅λλ€.
λ¨Όμ classμ ν둬ννΈλ₯Ό μ μν©λλ€.
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated, List
import operator
from langgraph.checkpoint.sqlite import SqliteSaver
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, AIMessage, ChatMessage
memory = SqliteSaver.from_conn_string(":memory:")
class AgentState(TypedDict):
task: str
plan: str
draft: str
critique: str
content: List[str]
revision_number: int
max_revisions: int
PLAN_PROMPT = """You are an expert writer tasked with writing a high level outline of an essay. \
Write such an outline for the user provided topic. Give an outline of the essay along with any relevant notes \
or instructions for the sections."""
WRITER_PROMPT = """You are an essay assistant tasked with writing excellent 5-paragraph essays.\
Generate the best essay possible for the user's request and the initial outline. \
If the user provides critique, respond with a revised version of your previous attempts. \
Utilize all the information below as needed:
------
{content}"""
REFLECTION_PROMPT = """You are a teacher grading an essay submission. \
Generate critique and recommendations for the user's submission. \
Provide detailed recommendations, including requests for length, depth, style, etc."""
RESEARCH_PLAN_PROMPT = """You are a researcher charged with providing information that can \
be used when writing the following essay. Generate a list of search queries that will gather \
any relevant information. Only generate 3 queries max."""
RESEARCH_CRITIQUE_PROMPT = """You are a researcher charged with providing information that can \
be used when making any requested revisions (as outlined below). \
Generate a list of search queries that will gather any relevant information. Only generate 3 queries max."""
classμ ν¨μλ₯Ό ꡬμ±ν©λλ€.
from langchain_core.pydantic_v1 import BaseModel
class Queries(BaseModel):
queries: List[str]
from tavily import TavilyClient
import os
tavily = TavilyClient(api_key=os.environ["TAVILY_API_KEY"])
def plan_node(state: AgentState):
messages = [
SystemMessage(content=PLAN_PROMPT),
HumanMessage(content=state['task'])
]
response = model.invoke(messages)
return {"plan": response.content}
def research_plan_node(state: AgentState):
queries = model.with_structured_output(Queries).invoke([
SystemMessage(content=RESEARCH_PLAN_PROMPT),
HumanMessage(content=state['task'])
])
content = state['content'] or []
for q in queries.queries:
response = tavily.search(query=q, max_results=2)
for r in response['results']:
content.append(r['content'])
return {"content": content}
def generation_node(state: AgentState):
content = "\n\n".join(state['content'] or [])
user_message = HumanMessage(
content=f"{state['task']}\n\nHere is my plan:\n\n{state['plan']}")
messages = [
SystemMessage(
content=WRITER_PROMPT.format(content=content)
),
user_message
]
response = model.invoke(messages)
return {
"draft": response.content,
"revision_number": state.get("revision_number", 1) + 1
}
def reflection_node(state: AgentState):
messages = [
SystemMessage(content=REFLECTION_PROMPT),
HumanMessage(content=state['draft'])
]
response = model.invoke(messages)
return {"critique": response.content}
def research_critique_node(state: AgentState):
queries = model.with_structured_output(Queries).invoke([
SystemMessage(content=RESEARCH_CRITIQUE_PROMPT),
HumanMessage(content=state['critique'])
])
content = state['content'] or []
for q in queries.queries:
response = tavily.search(query=q, max_results=2)
for r in response['results']:
content.append(r['content'])
return {"content": content}
def should_continue(state):
if state["revision_number"] > state["max_revisions"]:
return END
return "reflect"
μλμ κ°μ΄ Graphλ₯Ό ꡬμ±ν©λλ€.
builder = StateGraph(AgentState)
builder.add_node("planner", plan_node)
builder.add_node("generate", generation_node)
builder.add_node("reflect", reflection_node)
builder.add_node("research_plan", research_plan_node)
builder.add_node("research_critique", research_critique_node)
builder.set_entry_point("planner")
builder.add_conditional_edges(
"generate",
should_continue,
{END: END, "reflect": "reflect"}
)
builder.add_edge("planner", "research_plan")
builder.add_edge("research_plan", "generate")
builder.add_edge("reflect", "research_critique")
builder.add_edge("research_critique", "generate")
graph = builder.compile(checkpointer=memory)
μλμ κ°μ΄ μ€νν©λλ€.
thread = {"configurable": {"thread_id": "1"}}
for s in graph.stream({
'task': "what is the difference between langchain and langsmith",
"max_revisions": 2,
"revision_number": 1,
}, thread):
print(s)
Interfaceλ μλμ κ°μ΅λλ€.
import warnings
warnings.filterwarnings("ignore")
MultiAgent = ewriter()
app = writer_gui(MultiAgent.graph)
app.launch()