Skip to content

Commit

Permalink
Pushing agents and tools changes
Browse files Browse the repository at this point in the history
  • Loading branch information
bhancockio committed Jun 22, 2024
1 parent 34fada1 commit e121865
Show file tree
Hide file tree
Showing 6 changed files with 63 additions and 26 deletions.
32 changes: 17 additions & 15 deletions 5_agents_and_tools/1_agent_and_tools_basics.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
# Import necessary libraries
from dotenv import load_dotenv # To load environment variables from a .env file
from langchain import hub # To pull prompt templates from the hub
from langchain.agents import ( # To create and run agents
from dotenv import load_dotenv
from langchain import hub
from langchain.agents import (
AgentExecutor,
create_react_agent,
)
from langchain_core.tools import Tool # To define tools the agent can use
from langchain_openai import ChatOpenAI # To use the OpenAI model
from langchain_core.tools import Tool
from langchain_openai import ChatOpenAI

# Load environment variables from .env file
load_dotenv()
Expand All @@ -26,31 +25,34 @@ def get_current_time(*args, **kwargs):
Tool(
name="Time", # Name of the tool
func=get_current_time, # Function that the tool will execute
description="Useful for when you need to know the current time", # Description of the tool
# Description of the tool
description="Useful for when you need to know the current time",
),
]

# Pull the prompt template from the hub
# ReAct = Reason and Action
# https://smith.langchain.com/hub/hwchase17/react
prompt = hub.pull("hwchase17/react")

# Initialize a ChatOpenAI model
llm = ChatOpenAI(
model="gpt-4o", temperature=0
) # Using GPT-4 model with deterministic output
)

# Create the ReAct agent using the create_react_agent function
agent = create_react_agent(
llm=llm, # Language model the agent will use to generate responses
tools=tools, # List of tools the agent can use
prompt=prompt, # Prompt template to guide the agent
stop_sequence=True, # Add a stop sequence to prevent hallucinations
llm=llm,
tools=tools,
prompt=prompt,
stop_sequence=True,
)

# Create an agent executor from the agent and tools
agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, # Agent to execute
tools=tools, # List of tools available to the agent
verbose=True, # Enable detailed logging for debugging purposes
agent=agent,
tools=tools,
verbose=True,
)

# Run the agent with a test query
Expand Down
8 changes: 5 additions & 3 deletions 5_agents_and_tools/agent_deep_dive/1_agent_react_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ def search_wikipedia(query):
from wikipedia import summary

try:
return summary(query, sentences=2) # Limit to two sentences for brevity
# Limit to two sentences for brevity
return summary(query, sentences=2)
except:
return "I couldn't find any information on that."

Expand All @@ -47,11 +48,12 @@ def search_wikipedia(query):
prompt = hub.pull("hwchase17/structured-chat-agent")

# Initialize a ChatOpenAI model
llm = ChatOpenAI()
llm = ChatOpenAI(model="gpt-4o")

# Create a structured Chat Agent with Conversation Buffer Memory
# ConversationBufferMemory stores the conversation history, allowing the agent to maintain context across interactions
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
memory = ConversationBufferMemory(
memory_key="chat_history", return_messages=True)

# create_structured_chat_agent initializes a chat agent designed to interact using a structured prompt and tools
# It combines the language model (llm), tools, and prompt to create an interactive agent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@
# Check if the Chroma vector store already exists
if os.path.exists(persistent_directory):
print("Loading existing vector store...")
db = Chroma(persist_directory=persistent_directory, embedding_function=None)
db = Chroma(persist_directory=persistent_directory,
embedding_function=None)
else:
raise FileNotFoundError(
f"The directory {persistent_directory} does not exist. Please check the path."
Expand All @@ -32,7 +33,8 @@
embeddings = OpenAIEmbeddings(model="text-embedding-3-small")

# Load the existing vector store with the embedding function
db = Chroma(persist_directory=persistent_directory, embedding_function=embeddings)
db = Chroma(persist_directory=persistent_directory,
embedding_function=embeddings)

# Create a retriever for querying the vector store
# `search_type` specifies the type of search (e.g., similarity)
Expand Down Expand Up @@ -98,7 +100,8 @@
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)

# Create a retrieval chain that combines the history-aware retriever and the question answering chain
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
rag_chain = create_retrieval_chain(
history_aware_retriever, question_answer_chain)


# Set Up ReAct Agent with Document Store Retriever
Expand All @@ -123,15 +126,16 @@
)

agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent, tools=tools, handle_parsing_errors=True
agent=agent, tools=tools, handle_parsing_errors=True, verbose=True,
)

chat_history = []
while True:
query = input("You: ")
if query.lower() == "exit":
break
response = agent_executor.invoke({"input": query, "chat_history": chat_history})
response = agent_executor.invoke(
{"input": query, "chat_history": chat_history})
print(f"AI: {response['output']}")

# Update history
Expand Down
17 changes: 15 additions & 2 deletions 5_agents_and_tools/tools_deep_dive/3_tool_base_tool.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,22 @@
# Docs: https://python.langchain.com/v0.1/docs/modules/tools/custom_tools/

# Import necessary libraries
import os
from typing import Type

from dotenv import load_dotenv
from langchain import hub
from langchain.agents import AgentExecutor, create_tool_calling_agent
from langchain.pydantic_v1 import BaseModel, Field
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI


load_dotenv()

# Pydantic models for tool arguments


class SimpleSearchInput(BaseModel):
query: str = Field(description="should be a search query")

Expand All @@ -21,6 +27,8 @@ class MultiplyNumbersArgs(BaseModel):


# Custom tool with only custom input


class SimpleSearchTool(BaseTool):
name = "simple_search"
description = "useful for when you need to answer questions about current events"
Expand All @@ -31,7 +39,12 @@ def _run(
query: str,
) -> str:
"""Use the tool."""
return f"Search results for: {query}"
from tavily import TavilyClient

api_key = os.getenv("TAVILY_API_KEY")
client = TavilyClient(api_key=api_key)
results = client.search(query=query)
return f"Search results for: {query}\n\n\n{results}\n"


# Custom tool with custom input and output
Expand Down Expand Up @@ -78,7 +91,7 @@ def _run(
)

# Test the agent with sample queries
response = agent_executor.invoke({"input": "Search for 'LangChain updates'"})
response = agent_executor.invoke({"input": "Search for Apple Intelligence"})
print("Response for 'Search for LangChain updates':", response)

response = agent_executor.invoke({"input": "Multiply 10 and 20"})
Expand Down
17 changes: 16 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ bs4 = "^0.0.2"
firecrawl-py = "^0.0.13"
langchainhub = "^0.1.18"
wikipedia = "^1.4.0"
tavily-python = "^0.3.3"

[tool.pyright]
# https://github.com/microsoft/pyright/blob/main/docs/configuration.md
Expand Down

0 comments on commit e121865

Please sign in to comment.