Skip to content

Commit

Permalink
langchain memory
Browse files Browse the repository at this point in the history
  • Loading branch information
manufy committed Jun 8, 2024
1 parent 4956012 commit 15951d2
Show file tree
Hide file tree
Showing 18 changed files with 642 additions and 0 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@

#from langchain import OpenAI, ConversationChain
from langchain_openai import OpenAI
from langchain.chains import ConversationChain


llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)
conversation = ConversationChain(llm=llm, verbose=True)

output = conversation.predict(input="Hi there!")

print(output)
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#from langchain import OpenAI, ConversationChain
from langchain_openai import OpenAI
from langchain.chains import ConversationChain

llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)
conversation = ConversationChain(llm=llm, verbose=True)

from langchain.memory import ConversationBufferMemory

memory = ConversationBufferMemory(return_messages=True)
memory.save_context({"input": "hi there!"}, {"output": "Hi there! It's nice to meet you. How can I help you today?"})

print( memory.load_memory_variables({}) )

output = conversation.predict(input="In what scenarios extra memory should be used?")
output = conversation.predict(input="There are various types of memory in Langchain. When to use which type?")
output = conversation.predict(input="Do you remember what was our first message?")


print(output)
print("------------------")
print( memory.load_memory_variables({}) )
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from langchain import OpenAI, ConversationChain
#from langchain_openai import OpenAI
#from langchain.chains import ConversationChain

llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)
conversation = ConversationChain(llm=llm, verbose=True)

output = conversation.predict(input="In what scenarios extra memory should be used?")
output = conversation.predict(input="There are various types of memory in Langchain. When to use which type?")
output = conversation.predict(input="Do you remember what was our first message?")

print ("---- Output ----")
print(output)
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from langchain_openai import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory

# Initialize the language model
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)

# Initialize the memory
memory = ConversationBufferMemory()

# Initialize the conversation chain with the language model and memory
conversation = ConversationChain(llm=llm, memory=memory, verbose=True)

# Simulate a conversation
output1 = conversation.predict(input="In what scenarios extra memory should be used?")
print("--- Output 1 ---")
print(output1)

# Check memory after first message
print("--- Memory after Output 1 ---")
print(memory.buffer)

output2 = conversation.predict(input="There are various types of memory in Langchain. When to use which type?")
print("--- Output 2 ---")
print(output2)

# Check memory after second message
print("--- Memory after Output 2 ---")
print(memory.buffer)

output3 = conversation.predict(input="Do you remember what was our first message?")
print("--- Output 3 ---")
print(output3)

# Check memory after third message
print("--- Memory after Output 3 ---")
print(memory.buffer)


Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
from langchain_openai import OpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory

# Initialize the language model
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)

# Initialize the memory
memory = ConversationBufferMemory()

# Initialize the conversation chain with the language model and memory
conversation = ConversationChain(llm=llm, memory=memory, verbose=True)

# Simulate a conversation
input_messages = [
"In what scenarios extra memory should be used?",
"There are various types of memory in Langchain. When to use which type?",
"Do you remember what was our first message?"
]
output_messages = []

# Iterate over input messages
for input_message in input_messages:
# Predict response
output = conversation.predict(input=input_message)
print("--- Output ---")
print(output)
output_messages.append(output)

# Ensure memory buffer is initialized as a list
if not isinstance(memory.buffer, list):
memory.buffer = []

# Add input-output pair to memory buffer
memory.buffer.append((input_message, output))

# Check memory after the conversation
print("--- Memory ---")
print(memory.buffer)
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
from langchain_openai import OpenAI
from langchain.chains import ConversationChain

from langchain.memory import ConversationBufferMemory

llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)
conversation = ConversationChain(llm=llm, verbose=True, memory=ConversationBufferMemory())


memory = ConversationBufferMemory(return_messages=True)
memory.save_context({"input": "hi there!"}, {"output": "Hi there! It's nice to meet you. How can I help you today?"})

print( memory.load_memory_variables({}) )
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from langchain.chains import ConversationChain
from langchain_openai import OpenAI
from langchain.memory import ConversationBufferMemory

llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)

conversation = ConversationChain(
llm=llm,
verbose=True,
memory=ConversationBufferMemory()
)

output = conversation.predict(input="In what scenarios extra memory should be used?")
output = conversation.predict(input="There are various types of memory in Langchain. When to use which type?")
output = conversation.predict(input="Do you remember what was our first message?")

Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_openai import OpenAI

llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)


prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template("The following is a friendly conversation between a human and an AI."),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])

memory = ConversationBufferMemory(return_messages=True)
conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm, verbose=True)


print( conversation.predict(input="Tell me a joke about elephants") )
print( conversation.predict(input="Who is the author of the Harry Potter series?") )
print( conversation.predict(input="What was the joke you told me earlier?") )
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_openai import OpenAI

from langchain.load.dump import dumps

llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)

prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template("The following is a friendly conversation between a human and an AI."),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("{input}")
])

memory = ConversationBufferMemory(return_messages=True)
conversation = ConversationChain(memory=memory, prompt=prompt, llm=llm, verbose=True)

user_message = "Tell me about the history of the Internet."
response = conversation(user_message)

import pprint
print("------------")
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(response)



# User sends another message
user_message = "Who are some important figures in its development?"
response = conversation(user_message)
pp.pprint(response) # Chatbot responds with names of important figures, recalling the previous topic

print("------------")

user_message = "What did Tim Berners-Lee contribute?"
response = conversation(user_message)
print(response)
print ("--------- ")

print(response['response'])
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
{
"input": "Tell me about the history of the Internet.",
"history": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"HumanMessage"
],
"kwargs": {
"content": "Tell me about the history of the Internet.",
"type": "human"
}
},
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"messages",
"AIMessage"
],
"kwargs": {
"content": " \n\nAI: The Internet, also known as the World Wide Web, has a rich and complex history. It began as a project in the late 1960s by the United States Department of Defense, with the goal of creating a decentralized network that could withstand a nuclear attack. This project, called ARPANET, was the precursor to the modern Internet.\n\nIn the 1980s, the development of the TCP/IP protocol allowed for multiple networks to be connected, leading to the creation of the Internet as we know it today. The 1990s saw a surge in the popularity of the Internet, with the introduction of the World Wide Web and the first web browser, allowing for easy access to information and communication.\n\nThe early 2000s saw the rise of social media and e-commerce, further expanding the capabilities and reach of the Internet. Today, the Internet is an integral part of our daily lives, connecting people and information from all corners of the globe. Its history is constantly evolving as new technologies and innovations continue to shape its future.",
"type": "ai",
"tool_calls": [],
"invalid_tool_calls": []
}
}
],
"response": " \n\nAI: The Internet, also known as the World Wide Web, has a rich and complex history. It began as a project in the late 1960s by the United States Department of Defense, with the goal of creating a decentralized network that could withstand a nuclear attack. This project, called ARPANET, was the precursor to the modern Internet.\n\nIn the 1980s, the development of the TCP/IP protocol allowed for multiple networks to be connected, leading to the creation of the Internet as we know it today. The 1990s saw a surge in the popularity of the Internet, with the introduction of the World Wide Web and the first web browser, allowing for easy access to information and communication.\n\nThe early 2000s saw the rise of social media and e-commerce, further expanding the capabilities and reach of the Internet. Today, the Internet is an integral part of our daily lives, connecting people and information from all corners of the globe. Its history is constantly evolving as new technologies and innovations continue to shape its future."
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#from langchain import OpenAI, ConversationChain
from langchain_openai import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain

# TODO: Set your OPENAI API credentials in environemnt variables.
llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)

conversation = ConversationChain(
llm=llm,
verbose=True,
memory=ConversationBufferMemory()
)
result = conversation.predict(input="Hello!")
print(result)
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from langchain.memory import ConversationBufferWindowMemory
from langchain_openai import OpenAI
from langchain.chains import ConversationChain, LLMChain
from langchain_core.prompts import PromptTemplate

llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)

template = """You are ArtVenture, a cutting-edge virtual tour guide for
an art gallery that showcases masterpieces from alternate dimensions and
timelines. Your advanced AI capabilities allow you to perceive and understand
the intricacies of each artwork, as well as their origins and significance in
their respective dimensions. As visitors embark on their journey with you
through the gallery, you weave enthralling tales about the alternate histories
and cultures that gave birth to these otherworldly creations.
{chat_history}
Visitor: {visitor_input}
Tour Guide:"""

prompt = PromptTemplate(
input_variables=["chat_history", "visitor_input"],
template=template
)

chat_history=""

convo_buffer_win = ConversationChain(
llm=llm,
memory = ConversationBufferWindowMemory(k=3, return_messages=True)
)

convo_buffer_win("What is your name?")
convo_buffer_win("What can you do?")
convo_buffer_win("Do you mind give me a tour, I want to see your galery?")
convo_buffer_win("what is your working hours?")
result = convo_buffer_win("See you soon.")
print(result)
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from langchain.memory import ConversationBufferWindowMemory
from langchain_openai import OpenAI
from langchain.chains import ConversationChain, LLMChain
from langchain_core.prompts import PromptTemplate
from langchain.memory import ConversationSummaryMemory



llm = OpenAI(model_name="gpt-3.5-turbo-instruct", temperature=0)


# Create a ConversationChain with ConversationSummaryMemory
conversation_with_summary = ConversationChain(
llm=llm,
memory=ConversationSummaryMemory(llm=llm),
verbose=True
)

# Example conversation
response = conversation_with_summary.predict(input="Hi, what's up?")
print(response)





from langchain.memory import ConversationSummaryBufferMemory

prompt = PromptTemplate(
input_variables=["topic"],
template="The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.\nCurrent conversation:\n{topic}",
)

llm = OpenAI(temperature=0)
conversation_with_summary = ConversationChain(
llm=llm,
memory=ConversationSummaryBufferMemory(llm=OpenAI(), max_token_limit=40),
verbose=True
)
conversation_with_summary.predict(input="Hi, what's up?")
conversation_with_summary.predict(input="Just working on writing some documentation!")
response = conversation_with_summary.predict(input="For LangChain! Have you heard of it?")
print(response)

Loading

0 comments on commit 15951d2

Please sign in to comment.