Skip to content

Commit

Permalink
chat agent with basic functionality
Browse files Browse the repository at this point in the history
  • Loading branch information
jjoller committed Feb 7, 2025
1 parent 01bd5e2 commit af107cb
Show file tree
Hide file tree
Showing 17 changed files with 529 additions and 42 deletions.
5 changes: 3 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@
.nvm
node_modules
vocabtrainer.iml

__pycache__
__pycache__
venv
server/src/task_index/
93 changes: 80 additions & 13 deletions server/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,20 +1,87 @@
annotated-types==0.6.0
aiohappyeyeballs==2.4.4
aiohttp==3.11.11
aiosignal==1.3.2
annotated-types==0.7.0
anthropic==0.45.2
anyio==3.7.1
certifi==2023.11.17
asttokens==3.0.0
attrs==25.1.0
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.7
dataclasses-json==0.6.7
decorator==5.1.1
defusedxml==0.7.1
distro==1.9.0
exceptiongroup==1.2.0
executing==2.2.0
faiss-cpu==1.10.0
fastapi==0.105.0
frozenlist==1.5.0
greenlet==3.1.1
h11==0.14.0
httpcore==1.0.2
httpx==0.25.2
idna==3.6
numpy==1.26.2
pydantic==2.5.2
pydantic-settings==2.1.0
pydantic_core==2.14.5
python-dotenv==1.0.0
sniffio==1.3.0
httpcore==1.0.7
httpx==0.28.1
httpx-sse==0.4.0
idna==3.10
ipython==8.32.0
jedi==0.19.2
jellyfish==1.1.3
jiter==0.8.2
jsonpatch==1.33
jsonpointer==3.0.0
langchain==0.3.17
langchain-anthropic==0.3.6
langchain-community==0.3.16
langchain-core==0.3.33
langchain-openai==0.3.3
langchain-text-splitters==0.3.5
langgraph==0.2.69
langgraph-checkpoint==2.0.10
langgraph-sdk==0.1.51
langsmith==0.3.5
marshmallow==3.26.1
matplotlib-inline==0.1.7
msgpack==1.1.0
multidict==6.1.0
mypy-extensions==1.0.0
networkx==3.4.2
numpy==1.26.4
openai==1.61.1
orjson==3.10.15
packaging==24.2
parso==0.8.4
pexpect==4.9.0
prompt_toolkit==3.0.50
propcache==0.2.1
ptyprocess==0.7.0
pure_eval==0.2.3
pydantic==2.10.6
pydantic-settings==2.7.1
pydantic_core==2.27.2
Pygments==2.19.1
python-dotenv==1.0.1
PyYAML==6.0.2
regex==2024.11.6
requests==2.32.3
requests-toolbelt==1.0.0
segtok==1.5.11
sniffio==1.3.1
SQLAlchemy==2.0.37
stack-data==0.6.3
starlette==0.27.0
typing_extensions==4.9.0
tabulate==0.9.0
tavily-python==0.5.0
tenacity==9.0.0
tiktoken==0.8.0
tqdm==4.67.1
traitlets==5.14.3
typing-inspect==0.9.0
typing_extensions==4.12.2
urllib3==2.3.0
uvicorn==0.24.0.post1
requests==2.32.3
wcwidth==0.2.13
yake==0.4.8
yarl==1.18.3
zstandard==0.23.0
cachetools
File renamed without changes.
Empty file added server/src/aitutor/__init__.py
Empty file.
119 changes: 119 additions & 0 deletions server/src/aitutor/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
from typing import Annotated

from langchain_anthropic import ChatAnthropic
from langchain_core.messages import SystemMessage, HumanMessage, BaseMessage
from langgraph.checkpoint.memory import MemorySaver, logger
from langgraph.graph import StateGraph, START
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from pydantic import BaseModel
from typing_extensions import TypedDict
from aitutor.tools.feedback_tool import get_feedback_tool
from aitutor.tools.recommend_exercise_tool import recommend_exercise_tool, get_list_of_topics
from models import Message


# Define input model
class ChatRequest(BaseModel):
message: str


class State(TypedDict):
messages: Annotated[list, add_messages]


tools = [get_feedback_tool, recommend_exercise_tool, get_list_of_topics]

llm = ChatAnthropic(model="claude-3-5-sonnet-20240620")
llm_with_tools = llm.bind_tools(tools)


def chatbot(state: State):
return {"messages": [llm_with_tools.invoke(state["messages"])]}


memory = MemorySaver()

# Default state

# class TutorState(BaseModel):
# messages: list
#
# initial_state = TutorState(messages=[SystemMessage(content=system_prompt)])

graph_builder = StateGraph(State)

# The first argument is the unique node name
# The second argument is the function or object that will be called whenever the node is used.
graph_builder.add_node("chatbot", chatbot)
tool_node = ToolNode(tools=tools)
graph_builder.add_node("tools", tool_node)

graph_builder.add_conditional_edges(
"chatbot",
tools_condition,
)

# Any time a tool is called, we return to the chatbot to decide the next step
graph_builder.add_edge("tools", "chatbot")
graph_builder.add_edge(START, "chatbot")
graph = graph_builder.compile(checkpointer=memory)

system_prompt = (
"You are an interactive AI tutor. You ask the student what they want to learn and challenge them with exercises. "
"You provide feedback for the student's answers. "
"If the student answers incorrectly, offer helpful hints instead of direct answers. "
"After any clarification, always return to the exercise to keep the learning experience interactive and engaging. "
"Only use the tool for generating exercises. "
"Always respond in the same language as the exercises."
)

def ai_tutor_chat_call_inner(message: BaseMessage, user_config):
"""Handles user input and returns AI response."""
#user_message = message.content
print(f"message type: {message.type}")
if message.type == "human":
role = "user"
else:
role = "ai"
events = graph.stream(
{"messages": [{"role": role, "content": message.content }]},
user_config,
stream_mode="values",
)
response_messages = []
for event in events:
# logger.info("Event details: %s", event)
response_messages.append(event["messages"][-1].content)
return {"response": response_messages[-1] if response_messages else "No response generated."}

def ai_tutor_chat_call(message: Message, user_id):
user_config = {"configurable": {"thread_id": user_id}}
snapshot = graph.get_state(user_config)
print(f"snapshot: {snapshot}")
systemMessage = SystemMessage(
content=system_prompt
)
if snapshot.values.get("messages") is None:
print("STATE IS EMPTY!!")
ai_tutor_chat_call_inner(systemMessage, user_config)
return ai_tutor_chat_call_inner(HumanMessage(message.content), user_config)

# def input_chat(user_input: str, role: str = "user"):
# events = graph.stream(
# {"messages": [{"role": role, "content": user_input}]},
# config,
# stream_mode="values",
# )
# for event in events:
# event["messages"][-1].pretty_print()
#
#
# input_chat("Hallo")
#
# while True:
# user_input = input("User: ")
# if user_input.lower() in ["quit", "exit", "q"]:
# print("Goodbye!")
# break
# input_chat(user_input)
22 changes: 22 additions & 0 deletions server/src/aitutor/generate_stores.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import json
import langchain_openai
import langchain_community.vectorstores

from controllers.aitutor.lap_service import fetch_tasks

# Fetch the tasks from the LAP server
tasks = fetch_tasks(limit=1000)
print(f"Fetched {len(tasks)} tasks.")
task_documents = [json.dumps(task) for task in tasks]

for task in task_documents:
print(f"{task}")

# Initialize embedding model
embedding = langchain_openai.OpenAIEmbeddings()

# Create FAISS index from documents
rag_store = langchain_community.vectorstores.FAISS.from_texts(task_documents, embedding)

# Save the FAISS index to disk for later use
rag_store.save_local("task_index")
62 changes: 62 additions & 0 deletions server/src/aitutor/lap_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import requests

from aitutor.models import Task
from app_settings import settings

def fetch_tasks(offset=0, limit=10, tenant_ids=None, task_types=None):
"""
Fetch the tasks using the native LAP API.
:param offset: Where to start.
:param limit: How many tasks to fetch.
:param tenant_ids: Tenants from which to fetch the tasks from.
:param task_types: Task Types to fetch, defaults to OPEN.
:return: List of tasks in JSON format.
"""
if task_types is None:
task_types = ["OPEN"]
if tenant_ids is None:
tenant_ids = [128, 157]
task_list_url = f"{settings.lap_host}/api/task/filtered"
data = {
"tenantIds": tenant_ids,
"taskTypeList": task_types,
"offset": offset,
"limit": limit,
}
response = requests.post(task_list_url, headers=headers(), json=data)
if response.status_code == 200:
return response.json()
else:
print(f"Request failed with status code {response.status_code}: {response.text}")


def feedback(task: Task, answer: str, student_id: str):
"""
Call the public Feedback API: https://developers.taskbase.com/apis/specification/feedback/computefeedbackbytaskid
:param task: The task that the student was challenged with.
:param answer: The student's answer.
:param student_id: The identifier of the student.
:return: The feedback object in JSON format.
"""
feedback_api_url = f"{settings.lap_api_host}/tasks/{task.id}/feedback/compute"
data = {
"userId": student_id,
"tenantId": task.tenant_id,
"taskType": "FREEFORM_TEXT",
"answer": {
"content": answer
}
}
response = requests.post(feedback_api_url, headers=headers(), json=data)
if response.status_code == 200:
return response.json()
else:
print(f"Request failed with status code {response.status_code}: {response.text}")


def headers():
return {
"accept": "application/json",
"content-type": "application/json",
"Authorization": f"Bearer {settings.lap_token}",
}
22 changes: 22 additions & 0 deletions server/src/aitutor/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from pydantic import BaseModel, Field


class Task(BaseModel):
id: str = Field(description="Id of the task")
title: str = Field(description="Task the user was challenged with.")
description: str = Field(description="The answer the student provided.")
sample_solutions: list[str] = Field(description="The sample solutions for the task.")
tenant_id: int = Field(description="Tenant of the task.")
language: str = Field(description="Language of the task.")


class Criteria(BaseModel):
isCorrect: bool
message: str
aspect: str
description: str
context: str


class Feedback(BaseModel):
criteria: list[Criteria]
Empty file.
42 changes: 42 additions & 0 deletions server/src/aitutor/tools/feedback_tool.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from langchain_core.tools import tool

from aitutor.models import Task, Feedback, Criteria
from aitutor.lap_service import feedback


@tool(parse_docstring=True)
def get_feedback_tool(
task: Task,
answer: str,
student_id: str,
) -> Feedback:
"""A feedback generation engine providing students with helpful feedback.
Useful for evaluating student answers in exercises.
Args:
task (Task): The task the user was challenged with.
answer (str): The answer the student provided.
student_id (int): The student identifier.
Returns:
Feedback: The feedback for the student's answer broken down into criteria.
"""
feedback_result = feedback(
task=task,
answer=answer,
student_id=student_id
)
feedback_object = to_feedback(feedback_result)
return feedback_object


def to_feedback(feedback_result) -> Feedback:
criteria_list = [Criteria(
isCorrect=item["correctness"] == "CORRECT",
message=item["message"],
aspect=item["aspects"][0]["name"],
description=item["aspects"][0]["description"],
context=item["context"][0]["content"],
) for item in feedback_result["result"]["feedback"]]
return Feedback(criteria=criteria_list)
Loading

0 comments on commit af107cb

Please sign in to comment.