From c0e50de3045dc6b3b2e75bbc13b7cf042b38f50d Mon Sep 17 00:00:00 2001 From: whyXVI <139840508+whyXVI@users.noreply.github.com> Date: Mon, 9 Sep 2024 16:12:05 +0800 Subject: [PATCH] Corrected liteLLM model providers When I use default setting and clicked on fast/powerful, the following error message occurs: 500: LLM Provider NOT provided. Pass in the LLM provider you are trying to call. You passed model=OpenAI/gpt-4o-mini Pass model as E.g. For 'Huggingface' inference endpoints pass in completion(model='huggingface/starcoder',..) Learn more: https://docs.litellm.ai/docs/providers --- src/backend/constants.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/constants.py b/src/backend/constants.py index c6144a6..abae5c3 100644 --- a/src/backend/constants.py +++ b/src/backend/constants.py @@ -23,8 +23,8 @@ class ChatModel(str, Enum): model_mappings: dict[ChatModel, str] = { - ChatModel.GPT_4o: "gpt-4o", - ChatModel.GPT_4o_mini: "gpt-4o-mini", + ChatModel.GPT_4o: "openai/gpt-4o", + ChatModel.GPT_4o_mini: "openai/gpt-4o-mini", ChatModel.LLAMA_3_70B: "groq/llama-3.1-70b-versatile", ChatModel.LOCAL_LLAMA_3: "ollama_chat/llama3.1", ChatModel.LOCAL_GEMMA: "ollama_chat/gemma",