Skip to content

Commit

Permalink
Test cases updated and library dependencies added
Browse files Browse the repository at this point in the history
  • Loading branch information
sajedjalil committed Oct 2, 2024
1 parent a0a2133 commit 3cf0a02
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 23 deletions.
3 changes: 1 addition & 2 deletions home/constants/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,4 @@
# model_gpt_3_5_turbo_0125 = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
# model_chatgpt_4o = ChatOpenAI(model="chatgpt-4o-latest", temperature=0)


predefined_medical_model = model_claude_3_haiku
# You can also define other models from other providers like Ollama, Cohere. Just make sure they use langchain library
6 changes: 2 additions & 4 deletions home/constants/constants.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from langchain_core.prompts import PromptTemplate

summary_prompt = "Create a summary of the conversation above. Try to summarize within 200 words: "
summary_prompt = ("Create a summary of the conversation above. Try to summarize within 150 words. Do not mention "
"anything in response like total word count, or anything other than the summary")
summarize_trigger_count = 4

llm_prompt_text = '''You are a helpful AI medical assistant namely Patient Chat and are developed by a software
Expand Down Expand Up @@ -58,4 +57,3 @@
was a co-winner of her first Nobel Prize, making them the first-ever married couple to win the Nobel Prize and
launching the Curie family legacy of five Nobel Prizes. She was, in 1906, the first woman to become a professor at
the University of Paris. She also won a US presidential award."""

27 changes: 19 additions & 8 deletions home/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,24 +14,35 @@ def setUp(self):

def test_llm_name(self):
user_message = "Hi, my name is Sajed. What's your name?"
ai_response, summary = self.llm_graph.chat_inference(user_message, [], "test_" + str(uuid.uuid4()))
ai_response, summary, tools_called = self.llm_graph.chat_inference(user_message, [], "test_" + str(uuid.uuid4()))
self.assertTrue(ai_response.__contains__("Patient Chat"))
self.assertEqual(summary, None)
self.assertEqual(tools_called, list())

def test_llm_remembers_context_history(self):
history = history_one_turn
user_message = "Now tell me what's my name?"
ai_response, summary = self.llm_graph.chat_inference(user_message, history, "test_" + str(uuid.uuid4()))
ai_response, summary, tools_called = self.llm_graph.chat_inference(user_message, history, "test_" + str(uuid.uuid4()))
self.assertTrue(ai_response.__contains__("Sajed"))
self.assertNotEqual(summary, None)
self.assertEqual(tools_called, list())

def test_llm_tool_call(self):
history = []
user_message = "Change my medicine lorazepam."
ai_response, summary = self.llm_graph.chat_inference(user_message, history, "test_" + str(uuid.uuid4()))
self.assertGreater(len(ai_response), 50)
user_message = "Change my medication lorazepam."
ai_response, summary, tools_called = self.llm_graph.chat_inference(user_message, history, "test_" + str(uuid.uuid4()))
self.assertEqual(len(tools_called), 1)
self.assertNotEqual(summary, None)
# Tool calls internally increases token counts. We consider tools calling
# as a separate chat. That's why its trigger summary early. It's simply a design choice. Change the logic of
# LLMGraph.if_need_summarization() if you want a different design decision.

def test_llm_tool_call_with_summary(self):
history = history_two_turns
user_message = "Change my medicine lorazepam."
ai_response, summary = self.llm_graph.chat_inference(user_message, history, "test_" + str(uuid.uuid4()))
self.assertGreater(len(summary), 0)
user_message = "Change my medication lorazepam and schedule my appointment on Sunday."
ai_response, summary, tools_called = self.llm_graph.chat_inference(user_message, history, "test_" + str(uuid.uuid4()))
# Should make two tool calls. One for change medication and another for change appointment schedule.
self.assertEqual(len(tools_called), 2)
self.assertNotEqual(summary, None)


1 change: 0 additions & 1 deletion home/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ def insight(request):
user_message = data['message']

response = RAGGraph().rag_store_and_query(user_message)
print("response: ", response)
return JsonResponse({
'insight': response
})
Expand Down
16 changes: 8 additions & 8 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
pip
asgiref
Django~=5.1.1
pip
sqlparse
python-dotenv
notebook
psycopg
neo4j
psycopg==3.2.3
neo4j==5.25.0
json_repair

langgraph
langgraph-sdk
langgraph-checkpoint-sqlite
langsmith
langgraph==0.2.33
langgraph-sdk==0.1.32
langgraph-checkpoint-sqlite==2.0.0
langsmith==0.1.130

langchain
langchain-core
langchain-core==0.3.8
langchain-community
langchain_experimental

Expand Down

0 comments on commit 3cf0a02

Please sign in to comment.