-
Notifications
You must be signed in to change notification settings - Fork 0
/
rag_chatbot.py
67 lines (53 loc) · 2.01 KB
/
rag_chatbot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import streamlit as st
from langchain import hub
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_chroma import Chroma
from langchain_community.document_loaders import PyPDFLoader
@st.cache_resource
def get_retriever_from_pdf(file_path):
loader = PyPDFLoader(file_path)
docs = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
vectorstore = Chroma.from_documents(documents=splits, embedding=OpenAIEmbeddings())
retriever = vectorstore.as_retriever()
return retriever
@st.cache_resource
def build_chain(_llm, _retriever, _prompt):
chain = (
{
"context": _retriever,
"question": RunnablePassthrough(),
}
| _prompt
| _llm
| StrOutputParser()
)
return chain
def serve(chain):
if "logs" not in st.session_state.keys():
st.session_state.logs = [
{"role": "assistant", "content": "How may I help you?"}
]
for log in st.session_state.logs:
with st.chat_message(log["role"]):
st.write(log["content"])
if question := st.chat_input():
st.chat_message("user").write(question)
req_log = {"role": "user", "content": question}
st.session_state.logs.append(req_log)
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
answer = chain.invoke(question)
st.write(answer)
res_log = {"role": "assistant", "content": answer}
st.session_state.logs.append(res_log)
if __name__ == "__main__":
llm = ChatOpenAI(model="gpt-3.5-turbo-0125")
retriever = get_retriever_from_pdf("./docs.pdf")
prompt = hub.pull("rlm/rag-prompt")
chain = build_chain(llm, retriever, prompt)
serve(chain)