Skip to content

Commit

Permalink
update to use Claude 3.5 Haiku (#408)
Browse files Browse the repository at this point in the history
  • Loading branch information
vbarda authored Jan 8, 2025
1 parent c3091ab commit eddb98c
Show file tree
Hide file tree
Showing 5 changed files with 9 additions and 11 deletions.
4 changes: 2 additions & 2 deletions backend/retrieval_graph/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@ class AgentConfiguration(BaseConfiguration):
# models

query_model: str = field(
default="openai/gpt-4o-mini",
default="anthropic/claude-3-5-haiku-20241022",
metadata={
"description": "The language model used for processing and refining queries. Should be in the form: provider/model-name."
},
)

response_model: str = field(
default="openai/gpt-4o-mini",
default="anthropic/claude-3-5-haiku-20241022",
metadata={
"description": "The language model used for generating responses. Should be in the form: provider/model-name."
},
Expand Down
8 changes: 3 additions & 5 deletions backend/tests/evals/test_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,12 @@
from langchain_core.documents import Document
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langsmith.evaluation import EvaluationResults, aevaluate
from langsmith.schemas import Example, Run
from pydantic import BaseModel, Field

from backend.retrieval_graph.graph import graph
from backend.utils import format_docs
from backend.utils import format_docs, load_chat_model

DATASET_NAME = "chat-langchain-qa"
EXPERIMENT_PREFIX = "chat-langchain-ci"
Expand All @@ -20,10 +19,9 @@
SCORE_ANSWER_CORRECTNESS = "answer_correctness_score"
SCORE_ANSWER_VS_CONTEXT_CORRECTNESS = "answer_vs_context_correctness_score"

# claude sonnet / gpt-4o are a bit too expensive
JUDGE_MODEL_NAME = "gpt-4o-mini"
JUDGE_MODEL_NAME = "anthropic/claude-3-5-haiku-20241022"

judge_llm = ChatOpenAI(model_name=JUDGE_MODEL_NAME)
judge_llm = load_chat_model(JUDGE_MODEL_NAME)


# Evaluate retrieval
Expand Down
4 changes: 2 additions & 2 deletions frontend/app/components/SelectModel.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ import {
} from "./ui/select";

const modelOptionsAndLabels: Partial<Record<ModelOptions, string>> = {
"anthropic/claude-3-5-haiku-20241022": "Claude 3.5 Haiku",
"openai/gpt-4o-mini": "GPT 4o Mini",
"anthropic/claude-3-haiku-20240307": "Claude 3 Haiku",
// "groq/llama3-70b-8192": "Llama3 70b (Groq)",
"google_genai/gemini-pro": "Gemini Pro",
};
Expand All @@ -24,7 +24,7 @@ export function SelectModelComponent() {
<Select
onValueChange={(v) => setSelectedModel(v as ModelOptions)}
value={selectedModel}
defaultValue="openai/gpt-4o-mini"
defaultValue="anthropic/claude-3-5-haiku-20241022"
>
<SelectTrigger className="w-[180px] border-gray-600 text-gray-200">
<SelectValue placeholder="Model" />
Expand Down
2 changes: 1 addition & 1 deletion frontend/app/contexts/GraphContext.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ export function GraphProvider({ children }: { children: ReactNode }) {
const { shareRun } = useRuns();
const [messages, setMessages] = useState<BaseMessage[]>([]);
const [selectedModel, setSelectedModel] =
useState<ModelOptions>("openai/gpt-4o-mini");
useState<ModelOptions>("anthropic/claude-3-5-haiku-20241022");

const streamMessage = async (params: GraphInput): Promise<void> => {
if (!threadId) {
Expand Down
2 changes: 1 addition & 1 deletion frontend/app/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@ export type Feedback = {

export type ModelOptions =
| "openai/gpt-4o-mini"
| "anthropic/claude-3-haiku-20240307"
| "anthropic/claude-3-5-haiku-20241022"
| "groq/llama3-70b-8192"
| "google_genai/gemini-pro";

0 comments on commit eddb98c

Please sign in to comment.