-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Add LM Studio Provider #988
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3,6 +3,7 @@ import { getModel } from "./model"; | |
| import { Provider, Model } from "./config"; | ||
| import { env } from "@/env"; | ||
| import type { UserAIFields } from "./types"; | ||
| import { createOpenAICompatible } from "@ai-sdk/openai-compatible"; | ||
|
|
||
| // Mock AI provider imports | ||
| vi.mock("@ai-sdk/openai", () => ({ | ||
|
|
@@ -35,6 +36,10 @@ vi.mock("ollama-ai-provider", () => ({ | |
| createOllama: vi.fn(() => (model: string) => ({ model })), | ||
| })); | ||
|
|
||
| vi.mock("@ai-sdk/openai-compatible", () => ({ | ||
| createOpenAICompatible: vi.fn(() => (model: string) => ({ model })), | ||
| })); | ||
|
|
||
| vi.mock("@/env", () => ({ | ||
| env: { | ||
| DEFAULT_LLM_PROVIDER: "openai", | ||
|
|
@@ -56,6 +61,8 @@ vi.mock("@/env", () => ({ | |
| BEDROCK_ACCESS_KEY: "", | ||
| BEDROCK_SECRET_KEY: "", | ||
| NEXT_PUBLIC_BEDROCK_SONNET_MODEL: "anthropic.claude-3-sonnet-20240229-v1:0", | ||
| LM_STUDIO_BASE_URL: "http://localhost:1234/v1", | ||
| NEXT_PUBLIC_LM_STUDIO_MODEL: "qwen/qwen3-vl-4b", | ||
| }, | ||
| })); | ||
|
|
||
|
|
@@ -164,6 +171,27 @@ describe("Models", () => { | |
| // expect(result.model).toBeDefined(); | ||
| // }); | ||
|
|
||
| it("should configure LM Studio model correctly", () => { | ||
| const userAi: UserAIFields = { | ||
| aiApiKey: "user-api-key", | ||
| aiProvider: Provider.LM_STUDIO, | ||
| aiModel: Model.LM_STUDIO!, | ||
| }; | ||
|
|
||
| const result = getModel(userAi); | ||
|
|
||
| expect(result.provider).toBe(Provider.LM_STUDIO); | ||
| expect(result.modelName).toBe(Model.LM_STUDIO); | ||
|
|
||
| expect(createOpenAICompatible).toHaveBeenCalledWith({ | ||
| name: Provider.LM_STUDIO, | ||
| baseURL: env.LM_STUDIO_BASE_URL!, | ||
| supportsStructuredOutputs: true, | ||
| }); | ||
|
|
||
| expect(result.model).toBeDefined(); | ||
| }); | ||
|
Comment on lines
+174
to
+193
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🧩 Analysis chainVerify that LM Studio supports structured outputs. The test correctly validates that Please verify using LM Studio documentation or by searching the web whether LM Studio's OpenAI-compatible API supports the structured outputs feature. 🌐 Web query: 💡 Result: Yes. LM Studio's OpenAI-compatible API supports Structured Outputs: you can pass response_format: { type: "json_schema", json_schema: ... } to /v1/chat/completions and the model will return JSON conforming to the schema (output appears as a JSON string in choices[0].message.content). Note: not all small models (<7B) support structured output — check the model card. [1][2] Sources:
Update test to account for model size limitations on structured outputs support. LM Studio's OpenAI-compatible API does support Structured Outputs. However, not all small models (<7B) support structured output—check the model card. The test hardcodes |
||
|
|
||
| it("should configure Anthropic model correctly without Bedrock credentials", () => { | ||
| const userAi: UserAIFields = { | ||
| aiApiKey: "user-api-key", | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -6,6 +6,7 @@ import { createGoogleGenerativeAI } from "@ai-sdk/google"; | |
| import { createGroq } from "@ai-sdk/groq"; | ||
| import { createOpenRouter } from "@openrouter/ai-sdk-provider"; | ||
| import { createGateway } from "@ai-sdk/gateway"; | ||
| import { createOpenAICompatible } from "@ai-sdk/openai-compatible"; | ||
| // import { createOllama } from "ollama-ai-provider"; | ||
| import { env } from "@/env"; | ||
| import { Model, Provider } from "@/utils/llms/config"; | ||
|
|
@@ -139,6 +140,34 @@ function selectModel( | |
| // model: createOllama({ baseURL: env.OLLAMA_BASE_URL })(model), | ||
| // }; | ||
| } | ||
| case Provider.LM_STUDIO: { | ||
| const modelName = aiModel || Model.LM_STUDIO; | ||
|
|
||
| if (!modelName) { | ||
| throw new Error( | ||
| "LM Studio model not configured. Please set NEXT_PUBLIC_LM_STUDIO_MODEL environment variable.", | ||
| ); | ||
| } | ||
|
|
||
| if (!env.LM_STUDIO_BASE_URL) { | ||
| throw new Error( | ||
| "LM Studio base URL not configured. Please set LM_STUDIO_BASE_URL environment variable.", | ||
| ); | ||
| } | ||
|
|
||
| const lmstudio = createOpenAICompatible({ | ||
| name: Provider.LM_STUDIO, | ||
| baseURL: env.LM_STUDIO_BASE_URL, | ||
| supportsStructuredOutputs: true, | ||
| }); | ||
|
|
||
| return { | ||
| provider: Provider.LM_STUDIO, | ||
| modelName, | ||
| model: lmstudio(modelName), | ||
| backupModel: null, | ||
| }; | ||
| } | ||
|
|
||
| // this is messy. better to have two providers. one for bedrock and one for anthropic | ||
| case Provider.ANTHROPIC: { | ||
|
|
@@ -211,7 +240,17 @@ function createOpenRouterProviderOptions( | |
| */ | ||
| function selectEconomyModel(userAi: UserAIFields): SelectModel { | ||
| if (env.ECONOMY_LLM_PROVIDER && env.ECONOMY_LLM_MODEL) { | ||
| const isLMStudio = env.ECONOMY_LLM_PROVIDER === Provider.LM_STUDIO; | ||
| if (isLMStudio) { | ||
| return selectModel({ | ||
| aiProvider: Provider.LM_STUDIO, | ||
| aiModel: env.ECONOMY_LLM_MODEL, | ||
| aiApiKey: null, | ||
| }); | ||
| } | ||
|
|
||
|
Comment on lines
+243
to
+251
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Seems inconsistent? And would ignore the user settings if those are set? Same in the other plcae you did this
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If I don't add this then it will direct to default function (because LM studio does not have API key it provides URL). The default function selects the model which is set to default. So, if economy is set to LM studio it will revert to default model. I can also add a check that for LM studio URL check like This will also work. |
||
| const apiKey = getProviderApiKey(env.ECONOMY_LLM_PROVIDER); | ||
|
|
||
| if (!apiKey) { | ||
| logger.warn("Economy LLM provider configured but API key not found", { | ||
| provider: env.ECONOMY_LLM_PROVIDER, | ||
|
|
@@ -248,6 +287,15 @@ function selectEconomyModel(userAi: UserAIFields): SelectModel { | |
| */ | ||
| function selectChatModel(userAi: UserAIFields): SelectModel { | ||
| if (env.CHAT_LLM_PROVIDER && env.CHAT_LLM_MODEL) { | ||
| const isLMStudio = env.CHAT_LLM_PROVIDER === Provider.LM_STUDIO; | ||
| if (isLMStudio) { | ||
| return selectModel({ | ||
| aiProvider: Provider.LM_STUDIO, | ||
| aiModel: env.CHAT_LLM_MODEL, | ||
| aiApiKey: null, | ||
| }); | ||
| } | ||
|
|
||
| const apiKey = getProviderApiKey(env.CHAT_LLM_PROVIDER); | ||
| if (!apiKey) { | ||
| logger.warn("Chat LLM provider configured but API key not found", { | ||
|
|
@@ -301,7 +349,6 @@ function selectDefaultModel(userAi: UserAIFields): SelectModel { | |
| const openRouterOptions = createOpenRouterProviderOptions( | ||
| env.DEFAULT_OPENROUTER_PROVIDERS || "", | ||
| ); | ||
|
|
||
| // Preserve any custom options set earlier; always ensure reasoning exists. | ||
| const existingOpenRouterOptions = providerOptions.openrouter || {}; | ||
| providerOptions.openrouter = { | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Adding
Provider.LM_STUDIOwithout also surfacing it inproviderOptionsleaves LM Studio impossible to select, so the new provider is effectively unusable.Prompt for AI agents