From bce5e7da8d5e1363af9321d7638fc030a39605a1 Mon Sep 17 00:00:00 2001 From: mmz-001 <70096033+mmz-001@users.noreply.github.com> Date: Sun, 2 Jul 2023 09:36:20 +0530 Subject: [PATCH] use OpenAI chat model instead of completion model The latest chat model is faster and cheaper --- knowledge_gpt/utils/QA.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/knowledge_gpt/utils/QA.py b/knowledge_gpt/utils/QA.py index c05101a..1cc8e83 100644 --- a/knowledge_gpt/utils/QA.py +++ b/knowledge_gpt/utils/QA.py @@ -6,7 +6,7 @@ import streamlit as st from langchain.chains.qa_with_sources import load_qa_with_sources_chain from langchain.docstore.document import Document -from langchain.llms import OpenAI +from langchain.chat_models import ChatOpenAI from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import VectorStore from langchain.vectorstores.faiss import FAISS @@ -129,9 +129,8 @@ def get_answer(docs: List[Document], query: str) -> Dict[str, Any]: """Gets an answer to a question from a list of Documents.""" # Get the answer - chain = load_qa_with_sources_chain( - OpenAI( + ChatOpenAI( temperature=0, openai_api_key=st.session_state.get("OPENAI_API_KEY") ), # type: ignore chain_type="stuff",