Skip to content

Commit

Permalink
Added Anthropic Architecture
Browse files Browse the repository at this point in the history
  • Loading branch information
sajedjalil committed Sep 26, 2024
1 parent b3a7b53 commit d07002e
Show file tree
Hide file tree
Showing 9 changed files with 529 additions and 13 deletions.
8 changes: 8 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 6 additions & 12 deletions Patient_Chat/settings.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,12 @@
"""
Django settings for Patient_Chat project.
Generated by 'django-admin startproject' using Django 5.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/5.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/5.1/ref/settings/
"""

from pathlib import Path
import os
from dotenv import load_dotenv

# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
load_dotenv( os.path.join( BASE_DIR, '.env'))

ANTHROPIC_API_KEY = os.environ.get('ANTHROPIC_API_KEY')

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/5.1/howto/deployment/checklist/
Expand Down Expand Up @@ -117,3 +110,4 @@
# https://docs.djangoproject.com/en/5.1/ref/settings/#default-auto-field

DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'

Empty file added home/llm/__init__.py
Empty file.
45 changes: 45 additions & 0 deletions home/llm/llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from langchain_anthropic import ChatAnthropic


class LLM:
def __init__(self, model_name: str = "claude-3-haiku-20240307"):
self.model_name = model_name
llm = ChatAnthropic(model=model_name)

self.tools = [request_medication_change, make_appointment, request_appointment_change]
self.llm_with_tools = llm.bind_tools(self.tools)


def request_medication_change(previous_medication: str) -> str:
"""Puts a request to the doctor for medication change.
Returns a string with the name of current medication and Change request submitted.
Args:
previous_medication: first str
"""
return "Change request submitted for " + previous_medication


def make_appointment(date: str, reason: str) -> str:
"""Puts an appointment request on the specified date and reason.
Returns a string with the reason for the requested appointment and date for the appointment
Args:
date: first str
reason: second str
"""
return "Appointment requested on " + date + " for " + reason


def request_appointment_change(past_date: str, requested_date: str) -> str:
"""Puts an appointment change request for a given date and requested.
Returns a string with the changed and previous date for appointment.
Args:
past_date: first str
requested_date: second str
"""
return past_date + " " + requested_date
56 changes: 56 additions & 0 deletions home/llm/llm_graph.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
from langgraph.graph import START, StateGraph
from langgraph.graph.state import CompiledStateGraph
from langgraph.prebuilt import tools_condition, ToolNode

from langgraph.graph import MessagesState
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage

from home.llm.llm import LLM


class LLMGraph:
prompt_text = '''
You are a helpful AI medical assistant. You should only respond to health-related topics such as:
- General human health and lifestyle inquiries.
- Questions about the patient’s medical condition, medication regimen, diet, etc.
- Various requests from the patient to their doctor such as make appointments, modify appointments and medication changes.
You should filter out and ignore any unrelated, sensitive, or controversial topics.
'''

def __init__(self):
self.llm = LLM()
self.graph = self.build_graph()


def assistant(self, state: MessagesState):
# Prompt message
sys_msg = SystemMessage(content=self.prompt_text)
return {"messages": [self.llm.llm_with_tools.invoke([sys_msg] + state["messages"])]}

def build_graph(self) -> CompiledStateGraph:
builder = StateGraph(MessagesState)

# Define nodes: these do the work
builder.add_node("assistant", self.assistant)
builder.add_node("tools", ToolNode(self.llm.tools))

# Define edges: these determine how the control flow moves
builder.add_edge(START, "assistant")
builder.add_conditional_edges(
"assistant",
# If the latest message (result) from assistant is a tool call -> tools_condition routes to tools
# If the latest message (result) from assistant is a not a tool call -> tools_condition routes to END
tools_condition,
)
builder.add_edge("tools", "assistant")

return builder.compile()

def inference(self, user_message) -> str:
messages = [HumanMessage(content=user_message)]
messages = self.graph.invoke({"messages": messages})
for m in messages['messages']:
m.pretty_print()

print()
return messages['messages'][-1].content
Loading

0 comments on commit d07002e

Please sign in to comment.