Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions apps/web/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ DEFAULT_LLM_MODEL=
# BEDROCK_REGION=us-west-2
# OLLAMA_BASE_URL=http://localhost:11434/api
# NEXT_PUBLIC_OLLAMA_MODEL=phi3
# LM_STUDIO_BASE_URL=http://localhost:1234/v1
# NEXT_PUBLIC_LM_STUDIO_MODEL=qwen/qwen3-vl-4b

# Economy LLM configuration (for large context windows where cost efficiency matters)
ECONOMY_LLM_PROVIDER=
Expand Down
4 changes: 4 additions & 0 deletions apps/web/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ const llmProviderEnum = z.enum([
"groq",
"aigateway",
"ollama",
"lmstudio",
]);

export const env = createEnv({
Expand Down Expand Up @@ -57,6 +58,7 @@ export const env = createEnv({
OPENROUTER_API_KEY: z.string().optional(),
AI_GATEWAY_API_KEY: z.string().optional(),
OLLAMA_BASE_URL: z.string().optional(),
LM_STUDIO_BASE_URL: z.string().optional(),

UPSTASH_REDIS_URL: z.string().optional(),
UPSTASH_REDIS_TOKEN: z.string().optional(),
Expand Down Expand Up @@ -177,6 +179,7 @@ export const env = createEnv({
.string()
.default("us.anthropic.claude-3-7-sonnet-20250219-v1:0"),
NEXT_PUBLIC_OLLAMA_MODEL: z.string().optional(),
NEXT_PUBLIC_LM_STUDIO_MODEL: z.string().optional(),
NEXT_PUBLIC_APP_HOME_PATH: z.string().default("/setup"),
NEXT_PUBLIC_DUB_REFER_DOMAIN: z.string().optional(),
NEXT_PUBLIC_DISABLE_REFERRAL_SIGNATURE: z.coerce
Expand Down Expand Up @@ -237,6 +240,7 @@ export const env = createEnv({
NEXT_PUBLIC_BEDROCK_SONNET_MODEL:
process.env.NEXT_PUBLIC_BEDROCK_SONNET_MODEL,
NEXT_PUBLIC_OLLAMA_MODEL: process.env.NEXT_PUBLIC_OLLAMA_MODEL,
NEXT_PUBLIC_LM_STUDIO_MODEL: process.env.NEXT_PUBLIC_LM_STUDIO_MODEL,
NEXT_PUBLIC_APP_HOME_PATH: process.env.NEXT_PUBLIC_APP_HOME_PATH,
NEXT_PUBLIC_DUB_REFER_DOMAIN: process.env.NEXT_PUBLIC_DUB_REFER_DOMAIN,
NEXT_PUBLIC_DISABLE_REFERRAL_SIGNATURE:
Expand Down
1 change: 1 addition & 0 deletions apps/web/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
"@ai-sdk/google": "2.0.23",
"@ai-sdk/groq": "2.0.24",
"@ai-sdk/openai": "2.0.53",
"@ai-sdk/openai-compatible": "^1.0.27",
"@ai-sdk/provider": "2.0.0",
"@ai-sdk/react": "2.0.76",
"@asteasolutions/zod-to-openapi": "8.1.0",
Expand Down
2 changes: 2 additions & 0 deletions apps/web/utils/llms/config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ export const Provider = {
GROQ: "groq",
OPENROUTER: "openrouter",
AI_GATEWAY: "aigateway",
LM_STUDIO: "lmstudio",
Copy link
Contributor

@cubic-dev-ai cubic-dev-ai bot Nov 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Adding Provider.LM_STUDIO without also surfacing it in providerOptions leaves LM Studio impossible to select, so the new provider is effectively unusable.

Prompt for AI agents
Address the following comment on apps/web/utils/llms/config.ts at line 14:

<comment>Adding `Provider.LM_STUDIO` without also surfacing it in `providerOptions` leaves LM Studio impossible to select, so the new provider is effectively unusable.</comment>

<file context>
@@ -11,6 +11,7 @@ export const Provider = {
   GROQ: &quot;groq&quot;,
   OPENROUTER: &quot;openrouter&quot;,
   AI_GATEWAY: &quot;aigateway&quot;,
+  LM_STUDIO: &quot;lmstudio&quot;,
   ...(supportsOllama ? { OLLAMA: &quot;ollama&quot; } : {}),
 };
</file context>
Fix with Cubic

...(supportsOllama ? { OLLAMA: "ollama" } : {}),
};

Expand All @@ -31,6 +32,7 @@ export const Model = {
GEMINI_2_5_PRO_OPENROUTER: "google/gemini-2.5-pro",
GROQ_LLAMA_3_3_70B: "llama-3.3-70b-versatile",
KIMI_K2_OPENROUTER: "moonshotai/kimi-k2",
LM_STUDIO: env.NEXT_PUBLIC_LM_STUDIO_MODEL,
...(supportsOllama ? { OLLAMA: env.NEXT_PUBLIC_OLLAMA_MODEL } : {}),
};

Expand Down
28 changes: 28 additions & 0 deletions apps/web/utils/llms/model.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import { getModel } from "./model";
import { Provider, Model } from "./config";
import { env } from "@/env";
import type { UserAIFields } from "./types";
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";

// Mock AI provider imports
vi.mock("@ai-sdk/openai", () => ({
Expand Down Expand Up @@ -35,6 +36,10 @@ vi.mock("ollama-ai-provider", () => ({
createOllama: vi.fn(() => (model: string) => ({ model })),
}));

vi.mock("@ai-sdk/openai-compatible", () => ({
createOpenAICompatible: vi.fn(() => (model: string) => ({ model })),
}));

vi.mock("@/env", () => ({
env: {
DEFAULT_LLM_PROVIDER: "openai",
Expand All @@ -56,6 +61,8 @@ vi.mock("@/env", () => ({
BEDROCK_ACCESS_KEY: "",
BEDROCK_SECRET_KEY: "",
NEXT_PUBLIC_BEDROCK_SONNET_MODEL: "anthropic.claude-3-sonnet-20240229-v1:0",
LM_STUDIO_BASE_URL: "http://localhost:1234/v1",
NEXT_PUBLIC_LM_STUDIO_MODEL: "qwen/qwen3-vl-4b",
},
}));

Expand Down Expand Up @@ -164,6 +171,27 @@ describe("Models", () => {
// expect(result.model).toBeDefined();
// });

it("should configure LM Studio model correctly", () => {
const userAi: UserAIFields = {
aiApiKey: "user-api-key",
aiProvider: Provider.LM_STUDIO,
aiModel: Model.LM_STUDIO!,
};

const result = getModel(userAi);

expect(result.provider).toBe(Provider.LM_STUDIO);
expect(result.modelName).toBe(Model.LM_STUDIO);

expect(createOpenAICompatible).toHaveBeenCalledWith({
name: Provider.LM_STUDIO,
baseURL: env.LM_STUDIO_BASE_URL!,
supportsStructuredOutputs: true,
});

expect(result.model).toBeDefined();
});
Comment on lines +174 to +193
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

Verify that LM Studio supports structured outputs.

The test correctly validates that createOpenAICompatible is called with supportsStructuredOutputs: true (line 189). However, not all OpenAI-compatible APIs support structured outputs.

Please verify using LM Studio documentation or by searching the web whether LM Studio's OpenAI-compatible API supports the structured outputs feature.


🌐 Web query:

Does LM Studio OpenAI-compatible API support structured outputs?

💡 Result:

Yes. LM Studio's OpenAI-compatible API supports Structured Outputs: you can pass response_format: { type: "json_schema", json_schema: ... } to /v1/chat/completions and the model will return JSON conforming to the schema (output appears as a JSON string in choices[0].message.content). Note: not all small models (<7B) support structured output — check the model card. [1][2]

Sources:

  • LM Studio docs — Structured Output. [1]
  • OpenAI — Introducing Structured Outputs in the API. [2]

Update test to account for model size limitations on structured outputs support.

LM Studio's OpenAI-compatible API does support Structured Outputs. However, not all small models (<7B) support structured output—check the model card.

The test hardcodes supportsStructuredOutputs: true without validating that Model.LM_STUDIO actually supports this feature. If a smaller model is used, this could cause runtime failures when structured outputs are requested. Either validate the model size before enabling this flag, or document that LM Studio configuration requires a model >=7B.


it("should configure Anthropic model correctly without Bedrock credentials", () => {
const userAi: UserAIFields = {
aiApiKey: "user-api-key",
Expand Down
49 changes: 48 additions & 1 deletion apps/web/utils/llms/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import { createGoogleGenerativeAI } from "@ai-sdk/google";
import { createGroq } from "@ai-sdk/groq";
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
import { createGateway } from "@ai-sdk/gateway";
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
// import { createOllama } from "ollama-ai-provider";
import { env } from "@/env";
import { Model, Provider } from "@/utils/llms/config";
Expand Down Expand Up @@ -139,6 +140,34 @@ function selectModel(
// model: createOllama({ baseURL: env.OLLAMA_BASE_URL })(model),
// };
}
case Provider.LM_STUDIO: {
const modelName = aiModel || Model.LM_STUDIO;

if (!modelName) {
throw new Error(
"LM Studio model not configured. Please set NEXT_PUBLIC_LM_STUDIO_MODEL environment variable.",
);
}

if (!env.LM_STUDIO_BASE_URL) {
throw new Error(
"LM Studio base URL not configured. Please set LM_STUDIO_BASE_URL environment variable.",
);
}

const lmstudio = createOpenAICompatible({
name: Provider.LM_STUDIO,
baseURL: env.LM_STUDIO_BASE_URL,
supportsStructuredOutputs: true,
});

return {
provider: Provider.LM_STUDIO,
modelName,
model: lmstudio(modelName),
backupModel: null,
};
}

// this is messy. better to have two providers. one for bedrock and one for anthropic
case Provider.ANTHROPIC: {
Expand Down Expand Up @@ -211,7 +240,17 @@ function createOpenRouterProviderOptions(
*/
function selectEconomyModel(userAi: UserAIFields): SelectModel {
if (env.ECONOMY_LLM_PROVIDER && env.ECONOMY_LLM_MODEL) {
const isLMStudio = env.ECONOMY_LLM_PROVIDER === Provider.LM_STUDIO;
if (isLMStudio) {
return selectModel({
aiProvider: Provider.LM_STUDIO,
aiModel: env.ECONOMY_LLM_MODEL,
aiApiKey: null,
});
}

Comment on lines +243 to +251
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems inconsistent? And would ignore the user settings if those are set?

Same in the other plcae you did this

Copy link
Contributor Author

@ppranay20 ppranay20 Nov 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If I don't add this then it will direct to default function (because LM studio does not have API key it provides URL). The default function selects the model which is set to default. So, if economy is set to LM studio it will revert to default model.

I can also add a check that for LM studio URL check like
if (!apiKey && !url) { logger.warn("Economy LLM provider configured but API key not found", { provider: env.ECONOMY_LLM_PROVIDER, }); return selectDefaultModel(userAi); }

This will also work.

const apiKey = getProviderApiKey(env.ECONOMY_LLM_PROVIDER);

if (!apiKey) {
logger.warn("Economy LLM provider configured but API key not found", {
provider: env.ECONOMY_LLM_PROVIDER,
Expand Down Expand Up @@ -248,6 +287,15 @@ function selectEconomyModel(userAi: UserAIFields): SelectModel {
*/
function selectChatModel(userAi: UserAIFields): SelectModel {
if (env.CHAT_LLM_PROVIDER && env.CHAT_LLM_MODEL) {
const isLMStudio = env.CHAT_LLM_PROVIDER === Provider.LM_STUDIO;
if (isLMStudio) {
return selectModel({
aiProvider: Provider.LM_STUDIO,
aiModel: env.CHAT_LLM_MODEL,
aiApiKey: null,
});
}

const apiKey = getProviderApiKey(env.CHAT_LLM_PROVIDER);
if (!apiKey) {
logger.warn("Chat LLM provider configured but API key not found", {
Expand Down Expand Up @@ -301,7 +349,6 @@ function selectDefaultModel(userAi: UserAIFields): SelectModel {
const openRouterOptions = createOpenRouterProviderOptions(
env.DEFAULT_OPENROUTER_PROVIDERS || "",
);

// Preserve any custom options set earlier; always ensure reasoning exists.
const existingOpenRouterOptions = providerOptions.openrouter || {};
providerOptions.openrouter = {
Expand Down
Loading