Skip to content

Commit

Permalink
update code during demo
Browse files Browse the repository at this point in the history
  • Loading branch information
bhancockio committed Jun 17, 2024
1 parent e070f8f commit 7a268dd
Show file tree
Hide file tree
Showing 4 changed files with 79 additions and 67 deletions.
69 changes: 36 additions & 33 deletions 2_prompt_templates/1_prompt_template_basic.py
Original file line number Diff line number Diff line change
@@ -1,44 +1,47 @@
# Prompt Template Docs:
# https://python.langchain.com/v0.2/docs/concepts/#prompt-templateshttps://python.langchain.com/v0.2/docs/concepts/#prompt-templates

from langchain.prompts import ChatPromptTemplate
from langchain_core.messages import HumanMessage

# PART 1: Create a ChatPromptTemplate using a template string
template = "Tell me a joke about {topic}."
prompt_template = ChatPromptTemplate.from_template(template)
# # PART 1: Create a ChatPromptTemplate using a template string
# template = "Tell me a joke about {topic}."
# prompt_template = ChatPromptTemplate.from_template(template)

print("-----Prompt from Template-----")
prompt = prompt_template.invoke({"topic": "cats"})
print(prompt)
# print("-----Prompt from Template-----")
# prompt = prompt_template.invoke({"topic": "cats"})
# print(prompt)

# PART 2: Prompt with Multiple Placeholders
template_multiple = """You are a helpful assistant.
Human: Tell me a {adjective} story about a {animal}.
Assistant:"""
prompt_multiple = ChatPromptTemplate.from_template(template_multiple)
prompt = prompt_multiple.invoke({"adjective": "funny", "animal": "panda"})
print("\n----- Prompt with Multiple Placeholders -----\n")
print(prompt)
# # PART 2: Prompt with Multiple Placeholders
# template_multiple = """You are a helpful assistant.
# Human: Tell me a {adjective} story about a {animal}.
# Assistant:"""
# prompt_multiple = ChatPromptTemplate.from_template(template_multiple)
# prompt = prompt_multiple.invoke({"adjective": "funny", "animal": "panda"})
# print("\n----- Prompt with Multiple Placeholders -----\n")
# print(prompt)


# PART 3: Prompt with System and Human Messages (Using Tuples)
messages = [
("system", "You are a comedian who tells jokes about {topic}."),
("human", "Tell me {joke_count} jokes."),
]
prompt_template = ChatPromptTemplate.from_messages(messages)
prompt = prompt_template.invoke({"topic": "lawyers", "joke_count": 3})
print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
print(prompt)

# Extra Informoation about Part 3.
# This does work:
messages = [
("system", "You are a comedian who tells jokes about {topic}."),
HumanMessage(content="Tell me 3 jokes."),
]
prompt_template = ChatPromptTemplate.from_messages(messages)
prompt = prompt_template.invoke({"topic": "lawyers"})
print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
print(prompt)
# messages = [
# ("system", "You are a comedian who tells jokes about {topic}."),
# ("human", "Tell me {joke_count} jokes."),
# ]
# prompt_template = ChatPromptTemplate.from_messages(messages)
# prompt = prompt_template.invoke({"topic": "lawyers", "joke_count": 3})
# print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
# print(prompt)

# # Extra Informoation about Part 3.
# # This does work:
# messages = [
# ("system", "You are a comedian who tells jokes about {topic}."),
# HumanMessage(content="Tell me 3 jokes."),
# ]
# prompt_template = ChatPromptTemplate.from_messages(messages)
# prompt = prompt_template.invoke({"topic": "lawyers"})
# print("\n----- Prompt with System and Human Messages (Tuple) -----\n")
# print(prompt)


# This does NOT work:
Expand Down
1 change: 1 addition & 0 deletions 3_chains/1_chains_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

# Create the combined chain using LangChain Expression Language (LCEL)
chain = prompt_template | model | StrOutputParser()
# chain = prompt_template | model

# Run the chain
result = chain.invoke({"topic": "lawyers", "joke_count": 3})
Expand Down
17 changes: 9 additions & 8 deletions 3_chains/4_chains_parallel.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from dotenv import load_dotenv
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableBranch, RunnableLambda
from langchain.schema.runnable import RunnableParallel, RunnableLambda
from langchain_openai import ChatOpenAI

# Load environment variables from .env
Expand Down Expand Up @@ -53,20 +53,21 @@ def combine_pros_cons(pros, cons):


# Simplify branches with LCEL
pros_branch = (
RunnableLambda(lambda x: analyze_pros(x["features"])) | model | StrOutputParser()
pros_branch_chain = (
RunnableLambda(lambda x: analyze_pros(x)) | model | StrOutputParser()
)
cons_branch = (
RunnableLambda(lambda x: analyze_cons(x["features"])) | model | StrOutputParser()

cons_branch_chain = (
RunnableLambda(lambda x: analyze_cons(x)) | model | StrOutputParser()
)

# Create the combined chain using LangChain Expression Language (LCEL)
chain = (
prompt_template
| model
| RunnableLambda(lambda x: {"features": x})
| RunnableBranch(branches={"pros": pros_branch, "cons": cons_branch})
| RunnableLambda(lambda x: combine_pros_cons(x["pros"], x["cons"]))
| StrOutputParser()
| RunnableParallel(branches={"pros": pros_branch_chain, "cons": cons_branch_chain})
| RunnableLambda(lambda x: combine_pros_cons(x["branches"]["pros"], x["branches"]["cons"]))
)

# Run the chain
Expand Down
59 changes: 33 additions & 26 deletions 3_chains/5_chains_branching.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from dotenv import load_dotenv
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableableMap, RunnableBranch
from langchain.schema.runnable import RunnableBranch
from langchain_openai import ChatOpenAI

# Load environment variables from .env
Expand All @@ -14,14 +14,16 @@
positive_feedback_template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("human", "Generate a thank you note for this positive feedback: {feedback}."),
("human",
"Generate a thank you note for this positive feedback: {feedback}."),
]
)

negative_feedback_template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("human", "Generate a response addressing this negative feedback: {feedback}."),
("human",
"Generate a response addressing this negative feedback: {feedback}."),
]
)

Expand All @@ -45,41 +47,46 @@
]
)


# Define the branch conditions based on feedback sentiment
def is_positive(feedback):
return "good" in feedback.lower() or "excellent" in feedback.lower()


def is_negative(feedback):
return "bad" in feedback.lower() or "poor" in feedback.lower()


def is_neutral(feedback):
return "okay" in feedback.lower() or "neutral" in feedback.lower()

# Define the feedback classification template
classification_template = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("human",
"Classify the sentiment of this feedback as positive, negative, neutral, or escalate: {feedback}."),
]
)

# Define the runnable branches for handling feedback
branches = RunnableBranch(
(lambda x: is_positive(x), positive_feedback_template | model | StrOutputParser()),
(lambda x: is_negative(x), negative_feedback_template | model | StrOutputParser()),
(lambda x: is_neutral(x), neutral_feedback_template | model | StrOutputParser()),
escalate_feedback_template | model | StrOutputParser(),
(
lambda x: "positive" in x,
positive_feedback_template | model | StrOutputParser() # Positive feedback chain
),
(
lambda x: "negative" in x,
negative_feedback_template | model | StrOutputParser() # Negative feedback chain
),
(
lambda x: "neutral" in x,
neutral_feedback_template | model | StrOutputParser() # Neutral feedback chain
),
escalate_feedback_template | model | StrOutputParser()
)

# Create the combined chain using LangChain Expression Language (LCEL)
chain = branches
# Create the classification chain
classification_chain = classification_template | model | StrOutputParser()

# Combine classification and response generation into one chain
chain = classification_chain | branches

# Run the chain with an example review
# Good review - "The product is excellent. I really enjoyed using it and found it very helpful."
# Bad review - "The product is terrible. It broke after just one use and the quality is very poor."
# Neutral review - "The product is okay. It works as expected but nothing exceptional."
# Default - "I'm not sure about the product yet. Can you tell me more about its features and benefits?"

review = (
"The product is terrible. It broke after just one use and the quality is very poor."
)
result = chain.invoke(review)
review = "The product is terrible. It broke after just one use and the quality is very poor."
result = chain.invoke({"feedback": review})

# Output the result
print(result)

0 comments on commit 7a268dd

Please sign in to comment.