diff --git a/README.md b/README.md index 967ee1c83..c36a9b118 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ Intended use cases: - .env.example — UI env variables (VITE_API_BASE_URL, etc.) - Dockerfile — Builds static assets; serves via nginx with API upstream templating - docker/entrypoint.sh, docker/nginx.conf.template — Runtime nginx config - - llm/ — Internal library for LLM interactions (OpenAI client, zod). + - llm/ — Internal library for LLM interactions (LiteLLM-compatible OpenAI client, zod). - shared/ — Shared types/helpers for UI/Server. - json-schema-to-zod/ — Internal helper library. - docs/ — Platform documentation @@ -58,7 +58,7 @@ Intended use cases: - React 19, Vite 7, Tailwind CSS 4.1, Radix UI - Storybook 10 for component documentation - LLM: - - LiteLLM server (ghcr.io/berriai/litellm) or OpenAI (@langchain/* tooling) + - LiteLLM server (ghcr.io/berriai/litellm) providing adapters for upstream providers - Tooling: - pnpm 10.5 (corepack-enabled), Node.js 20 - Vitest 3 for testing; ESLint; Prettier @@ -98,8 +98,7 @@ pnpm install 2) Configure environments: - Server: copy packages/platform-server/.env.example to .env, then set: - AGENTS_DATABASE_URL (required) — e.g. postgresql://agents:agents@localhost:5443/agents - - LLM_PROVIDER — litellm or openai (no default) - - LITELLM_BASE_URL, LITELLM_MASTER_KEY (required for LiteLLM path) + - LITELLM_BASE_URL, LITELLM_MASTER_KEY (required for LiteLLM provisioning) - Optional: CORS_ORIGINS, VAULT_* (see packages/platform-server/src/core/services/config.service.ts and .env.example) - UI: copy packages/platform-ui/.env.example to .env and set: - VITE_API_BASE_URL — e.g. http://localhost:3010 @@ -135,11 +134,10 @@ Server listens on PORT (default 3010; see packages/platform-server/src/index.ts - Use published images from GHCR (see .github/workflows/docker-ghcr.yml): - ghcr.io/agynio/platform-server - ghcr.io/agynio/platform-ui - - Example: server (env must include AGENTS_DATABASE_URL, LLM_PROVIDER, LITELLM_BASE_URL, LITELLM_MASTER_KEY): + - Example: server (env must include AGENTS_DATABASE_URL, LITELLM_BASE_URL, LITELLM_MASTER_KEY): ```bash docker run --rm -p 3010:3010 \ -e AGENTS_DATABASE_URL=postgresql://agents:agents@host.docker.internal:5443/agents \ - -e LLM_PROVIDER=litellm \ -e LITELLM_BASE_URL=http://host.docker.internal:4000 \ -e LITELLM_MASTER_KEY=sk-dev-master-1234 \ ghcr.io/agynio/platform-server:latest @@ -156,11 +154,10 @@ docker run --rm -p 8080:80 \ Key environment variables (server) from packages/platform-server/.env.example and src/core/services/config.service.ts: - Required: - AGENTS_DATABASE_URL — Postgres connection for platform-server - - LLM_PROVIDER — litellm or openai - LITELLM_BASE_URL — LiteLLM root URL (must not include /v1; default host in docker-compose is 127.0.0.1:4000) - - LITELLM_MASTER_KEY — admin key for LiteLLM -- Optional LLM: - - OPENAI_API_KEY, OPENAI_BASE_URL + - LITELLM_MASTER_KEY — admin key for LiteLLM (virtual key alias `agyn_key` is provisioned automatically) +- Optional LiteLLM tuning: + - LITELLM_MODELS, LITELLM_KEY_DURATION, LITELLM_MAX_BUDGET, LITELLM_RPM_LIMIT, LITELLM_TPM_LIMIT, LITELLM_TEAM_ID - Graph store: - GRAPH_REPO_PATH (default ./data/graph) - GRAPH_BRANCH (default graph-state) @@ -176,6 +173,7 @@ Key environment variables (server) from packages/platform-server/.env.example an - NCPS_URL_SERVER, NCPS_URL_CONTAINER (default http://ncps:8501) - NCPS_PUBKEY_PATH (default /pubkey), fetch/refresh/backoff settings - NIX_ALLOWED_CHANNELS, NIX_* cache limits + - `/api/nix/resolve-repo` supports public GitHub repositories only; private repositories are not supported. - CORS: - CORS_ORIGINS — comma-separated allowed origins - Misc: @@ -259,7 +257,7 @@ pnpm --filter @agyn/platform-server run prisma:generate - Local compose: docker-compose.yml includes all supporting services required for dev workflows. - Server container: - Image: ghcr.io/agynio/platform-server - - Required env: AGENTS_DATABASE_URL, LLM_PROVIDER, LITELLM_BASE_URL, LITELLM_MASTER_KEY, optional Vault and CORS + - Required env: AGENTS_DATABASE_URL, LITELLM_BASE_URL, LITELLM_MASTER_KEY (optional Vault and CORS vars supported) - Exposes 3010; healthcheck verifies TCP connectivity - UI container: - Image: ghcr.io/agynio/platform-ui diff --git a/docs/contributing/style_guides.md b/docs/contributing/style_guides.md index 5ab8ffc5d..7dec4b95c 100644 --- a/docs/contributing/style_guides.md +++ b/docs/contributing/style_guides.md @@ -82,8 +82,8 @@ Our repo currently uses: ```ts // Bad: implicit any, unvalidated env, side effects in module scope -const key = process.env.OPENAI_API_KEY; // string | undefined -export const client = new OpenAI({ apiKey: key }); +const masterKey = process.env.LITELLM_MASTER_KEY; // string | undefined +export const client = new OpenAI({ apiKey: masterKey, baseURL: process.env.LITELLM_BASE_URL }); export function handle(data) { return data.id; @@ -94,7 +94,10 @@ export function handle(data) { // Good: validated config, explicit types, controlled side effects import { z } from 'zod'; -const Config = z.object({ OPENAI_API_KEY: z.string().min(1) }); +const Config = z.object({ + LITELLM_BASE_URL: z.string().url(), + LITELLM_MASTER_KEY: z.string().min(1), +}); const cfg = Config.parse(process.env); export interface Item { id: string } @@ -102,7 +105,10 @@ export function getId(item: Item): string { return item.id; } -export const client = new OpenAI({ apiKey: cfg.OPENAI_API_KEY }); +export const client = new OpenAI({ + apiKey: cfg.LITELLM_MASTER_KEY, + baseURL: `${cfg.LITELLM_BASE_URL.replace(/\/$/, '')}/v1`, +}); ``` ## Tooling diff --git a/docs/litellm-setup.md b/docs/litellm-setup.md index dcb59034e..7da5eb8df 100644 --- a/docs/litellm-setup.md +++ b/docs/litellm-setup.md @@ -21,26 +21,24 @@ Networking and ports - To expose on your LAN (not recommended without auth/TLS), edit docker-compose.yml and change the litellm ports mapping to either `0.0.0.0:4000:4000` or just `4000:4000`. Initial configuration (via UI) -- Create a provider key: add your real OpenAI (or other) API key under Providers. +- Create a provider key: add your real upstream API key (e.g., OpenAI, Anthropic, Azure OpenAI) under Providers. - Create a model alias if desired: - - Choose any name you prefer (e.g., gpt-5) and point it to a real backend model target (e.g., gpt-4o, gpt-4o-mini, or openai/gpt-4o). - - In the Agents UI, the Model field now accepts free-text. Enter either your alias name (e.g., gpt-5) or a provider-prefixed identifier (e.g., openai/gpt-4o-mini). The UI does not validate availability; runtime will surface errors if misconfigured. + - Choose any name you prefer (e.g., gpt-5) and point it to a real backend model target (e.g., openai/gpt-4o-mini). + - In the Agents UI, the Model field accepts free-text. Enter either your alias name (e.g., gpt-5) or a provider-prefixed identifier (e.g., openai/gpt-4o-mini). The UI does not validate availability; runtime surfaces errors if misconfigured. App configuration: LiteLLM admin requirements -- Set `LLM_PROVIDER=litellm` on the platform server. - LiteLLM administration env vars are required at boot: - `LITELLM_BASE_URL=http://localhost:4000` - `LITELLM_MASTER_KEY=sk-` -- The server provisions virtual keys by calling LiteLLM's admin API. Missing either env produces a `503 litellm_missing_config` response for the LLM settings API and disables UI writes. -- Optional overrides for generated virtual keys: +- The platform server always operates against LiteLLM. Missing either env produces a `503 litellm_missing_config` response for the LLM settings API and disables UI writes. +- Virtual keys are provisioned automatically under the fixed alias `agyn_key`. Optional overrides for generated keys: - `LITELLM_MODELS=gpt-5` (comma-separated list) - `LITELLM_KEY_DURATION=30d` - - `LITELLM_KEY_ALIAS=agents-${process.pid}` - Limits: `LITELLM_MAX_BUDGET`, `LITELLM_RPM_LIMIT`, `LITELLM_TPM_LIMIT`, `LITELLM_TEAM_ID` - Runtime requests use `${LITELLM_BASE_URL}/v1` with either the master key or the generated virtual key. Model naming guidance -- Use the exact LiteLLM model name as configured in the LiteLLM UI. For OpenAI via LiteLLM, provider prefixes may be required (e.g., openai/gpt-4o-mini). +- Use the exact LiteLLM model name as configured in the LiteLLM UI (e.g., openai/gpt-4o-mini). - Aliases are supported; enter the alias in the UI if you created one (e.g., gpt-5). - Provider identifiers should match the canonical keys exposed by LiteLLM's `/public/providers` endpoint. The platform normalizes a few historical aliases (for example, `azure_openai` now maps to `azure`), but using the official key avoids sync errors. - Provider names are handled case-insensitively and persisted as lowercase canonical keys. @@ -50,9 +48,6 @@ Agent configuration behavior - Agents respect the configured model end-to-end. If you set a model in the Agent configuration, the runtime binds that model to both the CallModel and Summarization nodes and will not silently fall back to the default (gpt-5). - Ensure the chosen model or alias exists in LiteLLM; misconfigured names will surface as runtime errors from the provider. -Direct OpenAI mode -- Set `LLM_PROVIDER=openai` and provide `OPENAI_API_KEY` (and optional `OPENAI_BASE_URL`). No LiteLLM envs are read in this mode. - Persistence verification - The LiteLLM DB persists to the named volume litellm_pgdata. - Stop and start services; your providers, virtual keys, and aliases should remain. diff --git a/docs/product-spec.md b/docs/product-spec.md index 556d567f5..b7d553943 100644 --- a/docs/product-spec.md +++ b/docs/product-spec.md @@ -102,14 +102,11 @@ Upgrade and migration Configuration matrix (server env vars) - Required - - GITHUB_APP_ID - - GITHUB_APP_PRIVATE_KEY (PEM; multiline ok) - - GITHUB_INSTALLATION_ID - - GH_TOKEN - - LLM_PROVIDER (litellm | openai) - - If `LLM_PROVIDER=litellm`: LITELLM_BASE_URL and LITELLM_MASTER_KEY - - If `LLM_PROVIDER=openai`: OPENAI_API_KEY (OPENAI_BASE_URL optional) + - AGENTS_DATABASE_URL + - LITELLM_BASE_URL (LiteLLM root without /v1) + - LITELLM_MASTER_KEY (admin key; virtual key alias `agyn_key` is managed automatically) - Optional + - GITHUB_APP_ID / GITHUB_APP_PRIVATE_KEY / GITHUB_INSTALLATION_ID / GH_TOKEN (only for GitHub App integrations) - GRAPH_REPO_PATH (default ./data/graph) - GRAPH_BRANCH (default graph-state) - GRAPH_AUTHOR_NAME / GRAPH_AUTHOR_EMAIL @@ -131,7 +128,7 @@ HTTP API and sockets (pointers) Runbooks - Local dev - Prereqs: Node 18+, pnpm, Docker, Postgres. - - Set: LLM_PROVIDER=litellm, LITELLM_BASE_URL, LITELLM_MASTER_KEY, GITHUB_*, GH_TOKEN, AGENTS_DATABASE_URL. Optional VAULT_* and DOCKER_MIRROR_URL. + - Set: AGENTS_DATABASE_URL, LITELLM_BASE_URL, LITELLM_MASTER_KEY. Optional: VAULT_*, DOCKER_MIRROR_URL, GitHub App env vars when integrations are enabled. - Start deps (compose or local Postgres) - Server: pnpm -w -F @agyn/platform-server dev - UI: pnpm -w -F @agyn/platform-ui dev diff --git a/packages/platform-server/.env.example b/packages/platform-server/.env.example index f0c2b3f91..9f0a7dae7 100644 --- a/packages/platform-server/.env.example +++ b/packages/platform-server/.env.example @@ -10,12 +10,16 @@ # Node id (nodeId) is required for deterministic upsert and is provided by the Agent node. AGENTS_DATABASE_URL= -# LLM provider must be explicit: 'openai' or 'litellm'. No default. -LLM_PROVIDER= - # LiteLLM admin setup (replace master key with your actual secret) LITELLM_BASE_URL=http://127.0.0.1:4000 LITELLM_MASTER_KEY=sk-dev-master-1234 +# Optional LiteLLM tuning (virtual key alias agyn_key is automatic) +# LITELLM_MODELS=gpt-4o +# LITELLM_KEY_DURATION=30d +# LITELLM_MAX_BUDGET=100 +# LITELLM_RPM_LIMIT=600 +# LITELLM_TPM_LIMIT=90000 +# LITELLM_TEAM_ID= # Optional: GitHub integration (App or PAT). Safe to omit for local dev. # GITHUB_APP_ID= diff --git a/packages/platform-server/README.md b/packages/platform-server/README.md index 7bf360f18..e5f598930 100644 --- a/packages/platform-server/README.md +++ b/packages/platform-server/README.md @@ -104,8 +104,7 @@ Persistent conversation state (Prisma) - `pnpm --filter @agyn/platform-server prisma studio` - Best-effort: if AGENTS_DATABASE_URL is not set or DB errors occur, reducers fall back to in-memory only. - Local dev: - - LLM_PROVIDER must be set explicitly to 'openai' or 'litellm'. There is no default. - - When `LLM_PROVIDER=litellm`, the server expects `LITELLM_BASE_URL` and `LITELLM_MASTER_KEY`. + - Provide `LITELLM_BASE_URL` and `LITELLM_MASTER_KEY` for LiteLLM administration. - In docker-compose development the admin base defaults to `http://127.0.0.1:4000` if unset. - For all other environments, set an explicit `LITELLM_BASE_URL` and master key. @@ -118,16 +117,13 @@ LITELLM_BASE_URL=http://127.0.0.1:4000 LITELLM_MASTER_KEY=sk-dev-master-1234 ``` -Replace `sk-dev-master-1234` with your actual LiteLLM master key if it differs. +Replace `sk-dev-master-1234` with your actual LiteLLM master key if it differs. The server provisions a virtual key using the fixed alias `agyn_key`; override TTL, allowed models, and rate limits via `LITELLM_KEY_DURATION`, `LITELLM_MODELS`, `LITELLM_MAX_BUDGET`, `LITELLM_RPM_LIMIT`, `LITELLM_TPM_LIMIT`, and `LITELLM_TEAM_ID`. ## Context item payload guard LiteLLM call logging, summarization, and tool execution persist context items as JSON blobs inside Postgres. The persistence layer now strips all `\u0000` (null bytes) from `contentText`, `contentJson`, and `metadata` prior to writes so Prisma does not reject the payload. -- Sanitization runs automatically for every `contextItem.create`/`update`. -- Enable a hard guard during development by setting `CONTEXT_ITEM_NULL_GUARD=1`. When the guard is active the server throws `ContextItemNullByteGuardError` if any unsanitized payload reaches the repository, ensuring new call sites cannot bypass the sanitizer. - -Set the flag while running targeted tests or during local debugging to immediately catch regressions that would otherwise surface as Prisma `null byte in string` errors at runtime. +- Sanitization runs automatically for every `contextItem.create`/`update`, and the null-byte guard is always enforced (no runtime toggle). - GitHub integration is optional. If no GitHub env is provided, the server boots and logs that GitHub is disabled. Any GitHub-dependent feature will error at runtime until credentials are configured. - Shell tool streaming persistence: - Tool stdout/stderr chunks are stored via Prisma when the `tool_output_*` tables exist. diff --git a/packages/platform-server/__e2e__/app.bootstrap.smoke.test.ts b/packages/platform-server/__e2e__/app.bootstrap.smoke.test.ts index ddf5c1ed6..994a8303c 100644 --- a/packages/platform-server/__e2e__/app.bootstrap.smoke.test.ts +++ b/packages/platform-server/__e2e__/app.bootstrap.smoke.test.ts @@ -24,7 +24,6 @@ import { LiveGraphRuntime } from '../src/graph-core/liveGraph.manager'; import { ConfigService, configSchema } from '../src/core/services/config.service'; import { LLMSettingsService } from '../src/settings/llm/llmSettings.service'; -process.env.LLM_PROVIDER = process.env.LLM_PROVIDER || 'litellm'; process.env.AGENTS_DATABASE_URL = process.env.AGENTS_DATABASE_URL || 'postgres://localhost:5432/test'; process.env.NCPS_ENABLED = process.env.NCPS_ENABLED || 'false'; process.env.CONTAINERS_CLEANUP_ENABLED = process.env.CONTAINERS_CLEANUP_ENABLED || 'false'; @@ -164,7 +163,6 @@ describe('App bootstrap smoke test', () => { const configService = new ConfigService().init( configSchema.parse({ - llmProvider: process.env.LLM_PROVIDER || 'litellm', litellmBaseUrl: process.env.LITELLM_BASE_URL || 'http://127.0.0.1:4000', litellmMasterKey: process.env.LITELLM_MASTER_KEY || 'sk-dev-master-1234', agentsDatabaseUrl: process.env.AGENTS_DATABASE_URL || 'postgres://localhost:5432/test', diff --git a/packages/platform-server/__e2e__/llmSettings.adminStatus.e2e.test.ts b/packages/platform-server/__e2e__/llmSettings.adminStatus.e2e.test.ts index 66a7adbaf..13f1393bf 100644 --- a/packages/platform-server/__e2e__/llmSettings.adminStatus.e2e.test.ts +++ b/packages/platform-server/__e2e__/llmSettings.adminStatus.e2e.test.ts @@ -11,14 +11,12 @@ import { ConfigService } from '../src/core/services/config.service'; describe('LLM settings controller (admin-status endpoint)', () => { let app: NestFastifyApplication; const previousEnv = { - llmProvider: process.env.LLM_PROVIDER, agentsDbUrl: process.env.AGENTS_DATABASE_URL, litellmBaseUrl: process.env.LITELLM_BASE_URL, litellmMasterKey: process.env.LITELLM_MASTER_KEY, }; beforeAll(async () => { - process.env.LLM_PROVIDER = 'litellm'; process.env.AGENTS_DATABASE_URL = 'postgres://localhost:5432/test'; process.env.LITELLM_BASE_URL = process.env.LITELLM_BASE_URL || 'http://127.0.0.1:4000'; process.env.LITELLM_MASTER_KEY = process.env.LITELLM_MASTER_KEY || 'sk-dev-master-1234'; @@ -38,7 +36,6 @@ describe('LLM settings controller (admin-status endpoint)', () => { afterAll(async () => { await app.close(); ConfigService.clearInstanceForTest(); - process.env.LLM_PROVIDER = previousEnv.llmProvider; process.env.AGENTS_DATABASE_URL = previousEnv.agentsDbUrl; process.env.LITELLM_BASE_URL = previousEnv.litellmBaseUrl; process.env.LITELLM_MASTER_KEY = previousEnv.litellmMasterKey; diff --git a/packages/platform-server/__e2e__/llmSettings.models.e2e.test.ts b/packages/platform-server/__e2e__/llmSettings.models.e2e.test.ts index dd0da50ac..01ad09403 100644 --- a/packages/platform-server/__e2e__/llmSettings.models.e2e.test.ts +++ b/packages/platform-server/__e2e__/llmSettings.models.e2e.test.ts @@ -11,14 +11,12 @@ import type { LiteLLMModelRecord } from '../src/settings/llm/types'; describe('LLM settings controller (models endpoint)', () => { let app: NestFastifyApplication; const previousEnv = { - llmProvider: process.env.LLM_PROVIDER, agentsDbUrl: process.env.AGENTS_DATABASE_URL, litellmBaseUrl: process.env.LITELLM_BASE_URL, litellmMasterKey: process.env.LITELLM_MASTER_KEY, }; beforeAll(async () => { - process.env.LLM_PROVIDER = 'litellm'; process.env.AGENTS_DATABASE_URL = 'postgres://localhost:5432/test'; process.env.LITELLM_BASE_URL = process.env.LITELLM_BASE_URL || 'http://127.0.0.1:4000'; process.env.LITELLM_MASTER_KEY = process.env.LITELLM_MASTER_KEY || 'sk-dev-master-1234'; @@ -39,7 +37,6 @@ describe('LLM settings controller (models endpoint)', () => { afterAll(async () => { await app.close(); ConfigService.clearInstanceForTest(); - process.env.LLM_PROVIDER = previousEnv.llmProvider; process.env.AGENTS_DATABASE_URL = previousEnv.agentsDbUrl; process.env.LITELLM_BASE_URL = previousEnv.litellmBaseUrl; process.env.LITELLM_MASTER_KEY = previousEnv.litellmMasterKey; diff --git a/packages/platform-server/__tests__/__e2e__/llmProvisioner.bootstrap.test.ts b/packages/platform-server/__tests__/__e2e__/llmProvisioner.bootstrap.test.ts index e29160bd6..fff0f1f09 100644 --- a/packages/platform-server/__tests__/__e2e__/llmProvisioner.bootstrap.test.ts +++ b/packages/platform-server/__tests__/__e2e__/llmProvisioner.bootstrap.test.ts @@ -14,7 +14,6 @@ const respondJson = (payload: unknown, init?: ResponseInit) => describe('LiteLLMProvisioner bootstrap (DI smoke)', () => { const requiredEnv: Record = { - LLM_PROVIDER: 'litellm', LITELLM_BASE_URL: 'http://127.0.0.1:4000', LITELLM_MASTER_KEY: 'sk-test', AGENTS_DATABASE_URL: 'postgresql://postgres:postgres@localhost:5432/agents_test', diff --git a/packages/platform-server/__tests__/agent.auto-send.test.ts b/packages/platform-server/__tests__/agent.auto-send.test.ts index 7f6fc422f..3d64da672 100644 --- a/packages/platform-server/__tests__/agent.auto-send.test.ts +++ b/packages/platform-server/__tests__/agent.auto-send.test.ts @@ -76,7 +76,6 @@ const createAgentFixture = async () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://user:pass@host/db', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/agent.busy.wait.mode.test.ts b/packages/platform-server/__tests__/agent.busy.wait.mode.test.ts index f5e0bc724..c0e63ebfe 100644 --- a/packages/platform-server/__tests__/agent.busy.wait.mode.test.ts +++ b/packages/platform-server/__tests__/agent.busy.wait.mode.test.ts @@ -40,7 +40,6 @@ describe('Agent busy gating (wait mode)', () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://user:pass@host/db', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/agent.error.termination.test.ts b/packages/platform-server/__tests__/agent.error.termination.test.ts index 42545ffc3..cdcc21182 100644 --- a/packages/platform-server/__tests__/agent.error.termination.test.ts +++ b/packages/platform-server/__tests__/agent.error.termination.test.ts @@ -34,7 +34,6 @@ describe('AgentNode error termination handling', () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://user:pass@host/db', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/agent.injectAfterTools.test.ts b/packages/platform-server/__tests__/agent.injectAfterTools.test.ts index 5089e700a..bb2425416 100644 --- a/packages/platform-server/__tests__/agent.injectAfterTools.test.ts +++ b/packages/platform-server/__tests__/agent.injectAfterTools.test.ts @@ -115,7 +115,6 @@ const createAgentFixture = async () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://user:pass@host/db', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/agent.node.termination.autosend.test.ts b/packages/platform-server/__tests__/agent.node.termination.autosend.test.ts index f7102f7ed..2ac1b8bd3 100644 --- a/packages/platform-server/__tests__/agent.node.termination.autosend.test.ts +++ b/packages/platform-server/__tests__/agent.node.termination.autosend.test.ts @@ -42,7 +42,6 @@ describe('AgentNode termination auto-send', () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'litellm', litellmBaseUrl: 'http://127.0.0.1:4000', litellmMasterKey: 'sk-dev-master-1234', agentsDatabaseUrl: 'postgres://user:pass@host/db', diff --git a/packages/platform-server/__tests__/agent.terminate.run.test.ts b/packages/platform-server/__tests__/agent.terminate.run.test.ts index 5809904a6..2285de5f3 100644 --- a/packages/platform-server/__tests__/agent.terminate.run.test.ts +++ b/packages/platform-server/__tests__/agent.terminate.run.test.ts @@ -35,7 +35,6 @@ describe('AgentNode termination flow', () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://user:pass@host/db', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/agent.thread.model-binding.test.ts b/packages/platform-server/__tests__/agent.thread.model-binding.test.ts index eb60b76c4..3ab466622 100644 --- a/packages/platform-server/__tests__/agent.thread.model-binding.test.ts +++ b/packages/platform-server/__tests__/agent.thread.model-binding.test.ts @@ -30,7 +30,6 @@ class StubProvisioner extends LLMProvisioner { describe('Agent thread model binding', () => { const baseConfig = { - llmProvider: 'openai', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', } as Partial; diff --git a/packages/platform-server/__tests__/app.module.smoke.test.ts b/packages/platform-server/__tests__/app.module.smoke.test.ts index 562f993f4..b1c55b227 100644 --- a/packages/platform-server/__tests__/app.module.smoke.test.ts +++ b/packages/platform-server/__tests__/app.module.smoke.test.ts @@ -22,7 +22,6 @@ import { LiveGraphRuntime } from '../src/graph-core/liveGraph.manager'; import { LLMProvisioner } from '../src/llm/provisioners/llm.provisioner'; import { clearTestConfig, registerTestConfig } from './helpers/config'; -process.env.LLM_PROVIDER = process.env.LLM_PROVIDER || 'litellm'; process.env.LITELLM_BASE_URL = process.env.LITELLM_BASE_URL || 'http://127.0.0.1:4000'; process.env.LITELLM_MASTER_KEY = process.env.LITELLM_MASTER_KEY || 'sk-test-master'; process.env.AGENTS_DATABASE_URL = process.env.AGENTS_DATABASE_URL || 'postgresql://postgres:postgres@localhost:5432/agents_test'; @@ -149,7 +148,6 @@ describe('AppModule bootstrap smoke test', () => { } satisfies Partial; const config = registerTestConfig({ - llmProvider: process.env.LLM_PROVIDER === 'openai' ? 'openai' : 'litellm', litellmBaseUrl: process.env.LITELLM_BASE_URL, litellmMasterKey: process.env.LITELLM_MASTER_KEY, agentsDatabaseUrl: process.env.AGENTS_DATABASE_URL, diff --git a/packages/platform-server/__tests__/config.service.fromEnv.test.ts b/packages/platform-server/__tests__/config.service.fromEnv.test.ts index d1f7adbb5..c31e280b4 100644 --- a/packages/platform-server/__tests__/config.service.fromEnv.test.ts +++ b/packages/platform-server/__tests__/config.service.fromEnv.test.ts @@ -3,7 +3,6 @@ import { afterEach, describe, expect, it } from 'vitest'; import { ConfigService } from '../src/core/services/config.service'; const previousEnv: Record = { - llmProvider: process.env.LLM_PROVIDER, litellmBaseUrl: process.env.LITELLM_BASE_URL, litellmMasterKey: process.env.LITELLM_MASTER_KEY, agentsDbUrl: process.env.AGENTS_DATABASE_URL, @@ -11,7 +10,6 @@ const previousEnv: Record = { describe('ConfigService.fromEnv', () => { afterEach(() => { - process.env.LLM_PROVIDER = previousEnv.llmProvider; process.env.LITELLM_BASE_URL = previousEnv.litellmBaseUrl; process.env.LITELLM_MASTER_KEY = previousEnv.litellmMasterKey; process.env.AGENTS_DATABASE_URL = previousEnv.agentsDbUrl; @@ -19,7 +17,6 @@ describe('ConfigService.fromEnv', () => { }); it('parses LiteLLM configuration from process environment', () => { - process.env.LLM_PROVIDER = 'litellm'; process.env.LITELLM_BASE_URL = 'http://127.0.0.1:4000/'; process.env.LITELLM_MASTER_KEY = ' sk-dev-master-1234 '; process.env.AGENTS_DATABASE_URL = 'postgresql://agents:agents@localhost:5443/agents'; diff --git a/packages/platform-server/__tests__/context-items.utils.test.ts b/packages/platform-server/__tests__/context-items.utils.test.ts index 4b30692c5..89b57ddf4 100644 --- a/packages/platform-server/__tests__/context-items.utils.test.ts +++ b/packages/platform-server/__tests__/context-items.utils.test.ts @@ -4,7 +4,6 @@ import { ContextItemRole, Prisma } from '@prisma/client'; import { ToolCallOutputMessage } from '@agyn/llm'; import { - ContextItemNullByteGuardError, contextItemInputFromMessage, deepSanitizeCreateData, normalizeContextItem, @@ -14,49 +13,22 @@ import { upsertNormalizedContextItems } from '../src/llm/services/context-items. const NULL_CHAR = String.fromCharCode(0); -function withNullGuardDisabled(fn: () => T | Promise): T | Promise { - const original = process.env.CONTEXT_ITEM_NULL_GUARD; - const legacy = process.env.CONTEXT_ITEM_NUL_GUARD; - delete process.env.CONTEXT_ITEM_NULL_GUARD; - delete process.env.CONTEXT_ITEM_NUL_GUARD; - - const restore = () => { - if (original === undefined) delete process.env.CONTEXT_ITEM_NULL_GUARD; - else process.env.CONTEXT_ITEM_NULL_GUARD = original; - if (legacy === undefined) delete process.env.CONTEXT_ITEM_NUL_GUARD; - else process.env.CONTEXT_ITEM_NUL_GUARD = legacy; - }; - - try { - const result = fn(); - if (result && typeof (result as Promise).then === 'function') { - return (result as Promise).finally(restore); - } - restore(); - return result; - } catch (error) { - restore(); - throw error; - } -} - describe('normalizeContextItem', () => { - it('strips null bytes from content text', () => - withNullGuardDisabled(() => { - const logger = { warn: vi.fn() }; - const result = normalizeContextItem( - { role: ContextItemRole.tool, contentText: `pre${NULL_CHAR}post` }, - logger, - ); + it('strips null bytes from content text', () => { + const logger = { warn: vi.fn() }; + const result = normalizeContextItem( + { role: ContextItemRole.tool, contentText: `pre${NULL_CHAR}post` }, + logger, + ); - expect(result).not.toBeNull(); - expect(result?.contentText).toBe('prepost'); - expect(result?.sizeBytes).toBe(Buffer.byteLength('prepost', 'utf8')); - expect(logger.warn).toHaveBeenCalledWith( - 'context_items.null_bytes_stripped', - expect.objectContaining({ removedLength: 1, field: 'contentText' }), - ); - })); + expect(result).not.toBeNull(); + expect(result?.contentText).toBe('prepost'); + expect(result?.sizeBytes).toBe(Buffer.byteLength('prepost', 'utf8')); + expect(logger.warn).toHaveBeenCalledWith( + 'context_items.null_bytes_stripped', + expect.objectContaining({ removedLength: 1, field: 'contentText' }), + ); + }); it('leaves clean text untouched and avoids warning', () => { const logger = { warn: vi.fn() }; @@ -71,159 +43,132 @@ describe('normalizeContextItem', () => { expect(logger.warn).not.toHaveBeenCalled(); }); - it('strips null bytes from metadata items field', () => - withNullGuardDisabled(() => { - const logger = { warn: vi.fn() }; - const result = normalizeContextItem( - { - role: ContextItemRole.assistant, - contentText: 'clean', - metadata: { - items: [`pre${NULL_CHAR}post`], - }, + it('strips null bytes from metadata items field', () => { + const logger = { warn: vi.fn() }; + const result = normalizeContextItem( + { + role: ContextItemRole.assistant, + contentText: 'clean', + metadata: { + items: [`pre${NULL_CHAR}post`], }, - logger, - ); + }, + logger, + ); - expect(result).not.toBeNull(); - expect(JSON.parse(JSON.stringify(result?.metadata))).toEqual({ items: ['prepost'] }); - expect(logger.warn).toHaveBeenCalledWith( - 'context_items.null_bytes_stripped', - expect.objectContaining({ removedLength: 1, path: 'metadata.items.0', field: 'metadata' }), - ); - })); + expect(result).not.toBeNull(); + expect(JSON.parse(JSON.stringify(result?.metadata))).toEqual({ items: ['prepost'] }); + expect(logger.warn).toHaveBeenCalledWith( + 'context_items.null_bytes_stripped', + expect.objectContaining({ removedLength: 1, path: 'metadata.items.0', field: 'metadata' }), + ); + }); - it('strips null bytes embedded in contentJson payloads', () => - withNullGuardDisabled(() => { - const logger = { warn: vi.fn() }; - const result = normalizeContextItem( - { - role: ContextItemRole.assistant, - contentJson: { - items: [ - { - type: 'input_text', - text: `hello${NULL_CHAR}world`, - }, - ], - }, + it('strips null bytes embedded in contentJson payloads', () => { + const logger = { warn: vi.fn() }; + const result = normalizeContextItem( + { + role: ContextItemRole.assistant, + contentJson: { + items: [ + { + type: 'input_text', + text: `hello${NULL_CHAR}world`, + }, + ], }, - logger, - ); + }, + logger, + ); - expect(result).not.toBeNull(); - expect(JSON.parse(JSON.stringify(result?.contentJson))).toEqual({ - items: [ - { - type: 'input_text', - text: 'helloworld', - }, - ], - }); - expect(logger.warn).toHaveBeenCalledWith( - 'context_items.null_bytes_stripped', - expect.objectContaining({ removedLength: 1, path: 'contentJson.items.0.text', field: 'contentJson' }), - ); - })); + expect(result).not.toBeNull(); + expect(JSON.parse(JSON.stringify(result?.contentJson))).toEqual({ + items: [ + { + type: 'input_text', + text: 'helloworld', + }, + ], + }); + expect(logger.warn).toHaveBeenCalledWith( + 'context_items.null_bytes_stripped', + expect.objectContaining({ removedLength: 1, path: 'contentJson.items.0.text', field: 'contentJson' }), + ); + }); }); describe('upsertNormalizedContextItems', () => { - it('persists sanitized metadata without null bytes', () => - withNullGuardDisabled(async () => { - const logger = { warn: vi.fn() }; - const normalized = normalizeContextItem( - { - role: ContextItemRole.assistant, - contentText: 'ack', - metadata: { - items: [`abc${NULL_CHAR}def`], - }, + it('persists sanitized metadata without null bytes', async () => { + const logger = { warn: vi.fn() }; + const normalized = normalizeContextItem( + { + role: ContextItemRole.assistant, + contentText: 'ack', + metadata: { + items: [`abc${NULL_CHAR}def`], }, - logger, - ); + }, + logger, + ); - expect(normalized).not.toBeNull(); + expect(normalized).not.toBeNull(); - const create = vi.fn(async (args: unknown) => { - const payload = args as { data: { metadata: unknown } }; - expect(JSON.parse(JSON.stringify(payload.data.metadata))).toEqual({ items: ['abcdef'] }); - return { id: 'ctx-1' }; - }); + const create = vi.fn(async (args: unknown) => { + const payload = args as { data: { metadata: unknown } }; + expect(JSON.parse(JSON.stringify(payload.data.metadata))).toEqual({ items: ['abcdef'] }); + return { id: 'ctx-1' }; + }); - const fakeClient = { contextItem: { create } } as unknown; + const fakeClient = { contextItem: { create } } as unknown; - const result = await upsertNormalizedContextItems(fakeClient as never, [normalized!], logger); - expect(result).toEqual({ ids: ['ctx-1'], created: 1 }); - expect(create).toHaveBeenCalledTimes(1); - })); + const result = await upsertNormalizedContextItems(fakeClient as never, [normalized!], logger); + expect(result).toEqual({ ids: ['ctx-1'], created: 1 }); + expect(create).toHaveBeenCalledTimes(1); + }); }); describe('sanitizeContextItemPayload', () => { - it('strips null bytes from nested payloads and remains JSON stringifiable', () => - withNullGuardDisabled(() => { - const logger = { warn: vi.fn() }; - const payload = { - contentText: `hello${NULL_CHAR}world`, - contentJson: { - raw_preview: `preview${NULL_CHAR}value`, - blocks: [ - { - kind: 'text', - text: `block${NULL_CHAR}text`, - children: [{ note: `child${NULL_CHAR}note` }], - }, - ], - }, - metadata: { - debugLabel: `label${NULL_CHAR}value`, - nested: [{ tag: `inner${NULL_CHAR}tag` }], - }, - extra: [{ misc: `array${NULL_CHAR}entry` }], - }; - - const sanitized = sanitizeContextItemPayload(payload, logger); - - expect(sanitized).not.toBe(payload); - expect(sanitized.contentText).toBe('helloworld'); - expect(JSON.parse(JSON.stringify(sanitized.contentJson))).toEqual({ - raw_preview: 'previewvalue', + it('strips null bytes from nested payloads and remains JSON stringifiable', () => { + const logger = { warn: vi.fn() }; + const payload = { + contentText: `hello${NULL_CHAR}world`, + contentJson: { + raw_preview: `preview${NULL_CHAR}value`, blocks: [ { kind: 'text', - text: 'blocktext', - children: [{ note: 'childnote' }], + text: `block${NULL_CHAR}text`, + children: [{ note: `child${NULL_CHAR}note` }], }, ], - }); - expect(JSON.parse(JSON.stringify(sanitized.metadata))).toEqual({ - debugLabel: 'labelvalue', - nested: [{ tag: 'innertag' }], - }); - expect(() => JSON.stringify(sanitized)).not.toThrow(); - expect(logger.warn).toHaveBeenCalled(); - })); + }, + metadata: { + debugLabel: `label${NULL_CHAR}value`, + nested: [{ tag: `inner${NULL_CHAR}tag` }], + }, + extra: [{ misc: `array${NULL_CHAR}entry` }], + }; - it('throws when guard flag is enabled and null bytes are present', () => { - expect(() => - sanitizeContextItemPayload( - { contentText: `guard${NULL_CHAR}trip` }, - undefined, - { guard: true }, - ), - ).toThrow(ContextItemNullByteGuardError); - }); + const sanitized = sanitizeContextItemPayload(payload, logger); - it('honors CONTEXT_ITEM_NULL_GUARD environment flag', () => { - const original = process.env.CONTEXT_ITEM_NULL_GUARD; - process.env.CONTEXT_ITEM_NULL_GUARD = '1'; - try { - expect(() => sanitizeContextItemPayload({ contentText: `env${NULL_CHAR}trip` })).toThrow( - ContextItemNullByteGuardError, - ); - } finally { - if (original === undefined) delete process.env.CONTEXT_ITEM_NULL_GUARD; - else process.env.CONTEXT_ITEM_NULL_GUARD = original; - } + expect(sanitized).not.toBe(payload); + expect(sanitized.contentText).toBe('helloworld'); + expect(JSON.parse(JSON.stringify(sanitized.contentJson))).toEqual({ + raw_preview: 'previewvalue', + blocks: [ + { + kind: 'text', + text: 'blocktext', + children: [{ note: 'childnote' }], + }, + ], + }); + expect(JSON.parse(JSON.stringify(sanitized.metadata))).toEqual({ + debugLabel: 'labelvalue', + nested: [{ tag: 'innertag' }], + }); + expect(() => JSON.stringify(sanitized)).not.toThrow(); + expect(logger.warn).toHaveBeenCalled(); }); }); @@ -269,33 +214,32 @@ describe('contextItemInputFromMessage', () => { }); describe('deepSanitizeCreateData', () => { - it('strips null bytes across all create payload fields', () => - withNullGuardDisabled(() => { - const logger = { warn: vi.fn() }; - const payload = { - role: ContextItemRole.assistant, - contentText: `text${NULL_CHAR}suffix`, - contentJson: { foo: `bar${NULL_CHAR}baz`, nested: [{ value: `arr${NULL_CHAR}entry` }] } as Prisma.InputJsonValue, - metadata: { info: `meta${NULL_CHAR}data` } as Prisma.InputJsonValue, - sizeBytes: 42, - } satisfies Prisma.ContextItemCreateInput; - - const sanitized = deepSanitizeCreateData(payload, logger); - - expect(sanitized.contentText).toBe('textsuffix'); - expect(JSON.parse(JSON.stringify(sanitized.contentJson))).toEqual({ foo: 'barbaz', nested: [{ value: 'arrentry' }] }); - expect(JSON.parse(JSON.stringify(sanitized.metadata))).toEqual({ info: 'metadata' }); - expect(logger.warn).toHaveBeenCalledWith( - 'context_items.null_bytes_stripped', - expect.objectContaining({ field: 'contentText', path: 'contentText', removedLength: 1 }), - ); - expect(logger.warn).toHaveBeenCalledWith( - 'context_items.null_bytes_stripped', - expect.objectContaining({ field: 'contentJson', path: 'contentJson.foo', removedLength: 1 }), - ); - expect(logger.warn).toHaveBeenCalledWith( - 'context_items.null_bytes_stripped', - expect.objectContaining({ field: 'metadata', path: 'metadata.info', removedLength: 1 }), - ); - })); + it('strips null bytes across all create payload fields', () => { + const logger = { warn: vi.fn() }; + const payload = { + role: ContextItemRole.assistant, + contentText: `text${NULL_CHAR}suffix`, + contentJson: { foo: `bar${NULL_CHAR}baz`, nested: [{ value: `arr${NULL_CHAR}entry` }] } as Prisma.InputJsonValue, + metadata: { info: `meta${NULL_CHAR}data` } as Prisma.InputJsonValue, + sizeBytes: 42, + } satisfies Prisma.ContextItemCreateInput; + + const sanitized = deepSanitizeCreateData(payload, logger); + + expect(sanitized.contentText).toBe('textsuffix'); + expect(JSON.parse(JSON.stringify(sanitized.contentJson))).toEqual({ foo: 'barbaz', nested: [{ value: 'arrentry' }] }); + expect(JSON.parse(JSON.stringify(sanitized.metadata))).toEqual({ info: 'metadata' }); + expect(logger.warn).toHaveBeenCalledWith( + 'context_items.null_bytes_stripped', + expect.objectContaining({ field: 'contentText', path: 'contentText', removedLength: 1 }), + ); + expect(logger.warn).toHaveBeenCalledWith( + 'context_items.null_bytes_stripped', + expect.objectContaining({ field: 'contentJson', path: 'contentJson.foo', removedLength: 1 }), + ); + expect(logger.warn).toHaveBeenCalledWith( + 'context_items.null_bytes_stripped', + expect.objectContaining({ field: 'metadata', path: 'metadata.info', removedLength: 1 }), + ); + }); }); diff --git a/packages/platform-server/__tests__/graph.mcp.integration.test.ts b/packages/platform-server/__tests__/graph.mcp.integration.test.ts index 4b8a2df83..a27ed0134 100644 --- a/packages/platform-server/__tests__/graph.mcp.integration.test.ts +++ b/packages/platform-server/__tests__/graph.mcp.integration.test.ts @@ -121,11 +121,8 @@ class StubConfigService extends ConfigService { githubAppId: 'test', githubAppPrivateKey: 'test', githubInstallationId: 'test', - openaiApiKey: 'test', - llmProvider: 'openai', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', - openaiBaseUrl: undefined, githubToken: 'test', graphRepoPath: './data/graph', graphBranch: 'graph-state', diff --git a/packages/platform-server/__tests__/graph.module.di.smoke.test.ts b/packages/platform-server/__tests__/graph.module.di.smoke.test.ts index 3f14472a3..9368306de 100644 --- a/packages/platform-server/__tests__/graph.module.di.smoke.test.ts +++ b/packages/platform-server/__tests__/graph.module.di.smoke.test.ts @@ -27,7 +27,6 @@ import { GraphSocketGateway } from '../src/gateway/graph.socket.gateway'; import { GatewayModule } from '../src/gateway/gateway.module'; import { LiveGraphRuntime } from '../src/graph-core/liveGraph.manager'; -process.env.LLM_PROVIDER = 'openai'; process.env.AGENTS_DATABASE_URL = process.env.AGENTS_DATABASE_URL || 'postgres://localhost:5432/test'; process.env.NCPS_ENABLED = process.env.NCPS_ENABLED || 'false'; process.env.CONTAINERS_CLEANUP_ENABLED = process.env.CONTAINERS_CLEANUP_ENABLED || 'false'; @@ -169,7 +168,6 @@ if (!shouldRunDbTests) { const configServiceStub = new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://localhost:5432/test', }), ); diff --git a/packages/platform-server/__tests__/helpers/config.ts b/packages/platform-server/__tests__/helpers/config.ts index 5e3b394d6..38321d903 100644 --- a/packages/platform-server/__tests__/helpers/config.ts +++ b/packages/platform-server/__tests__/helpers/config.ts @@ -1,7 +1,6 @@ import { ConfigService, configSchema } from '../../src/core/services/config.service'; const defaultConfigInput = { - llmProvider: 'litellm', litellmBaseUrl: 'http://127.0.0.1:4000', litellmMasterKey: 'sk-test-master', agentsDatabaseUrl: 'postgresql://postgres:postgres@localhost:5432/agents_test', diff --git a/packages/platform-server/__tests__/litellm.admin.integration.test.ts b/packages/platform-server/__tests__/litellm.admin.integration.test.ts index 88f6b3306..002e8b915 100644 --- a/packages/platform-server/__tests__/litellm.admin.integration.test.ts +++ b/packages/platform-server/__tests__/litellm.admin.integration.test.ts @@ -10,7 +10,6 @@ const MASTER_KEY = 'sk-dev-master-1234'; const createConfig = () => new ConfigService().init( configSchema.parse({ - llmProvider: 'litellm', litellmBaseUrl: LITELLM_BASE, litellmMasterKey: MASTER_KEY, agentsDatabaseUrl: 'postgres://dev:dev@localhost:5432/agents', diff --git a/packages/platform-server/__tests__/litellm.provision.test.ts b/packages/platform-server/__tests__/litellm.provision.test.ts index 50bce018f..9aed42306 100644 --- a/packages/platform-server/__tests__/litellm.provision.test.ts +++ b/packages/platform-server/__tests__/litellm.provision.test.ts @@ -40,7 +40,6 @@ const respondStatus = (status: number, body = '') => new Response(body, { status const baseConfig = (): ConfigService => { const params: Partial = { - llmProvider: 'litellm', litellmBaseUrl: 'http://litellm.local:4000', litellmMasterKey: 'sk-master', agentsDatabaseUrl: 'postgres://dev:dev@localhost:5432/agents', @@ -57,15 +56,10 @@ const getUrl = (input: RequestInfo | URL): string => { describe('LiteLLMProvisioner', () => { beforeEach(() => { vi.useRealTimers(); - process.env.LITELLM_KEY_ALIAS = 'agents/unit-test'; - }); - - afterEach(() => { - delete process.env.LITELLM_KEY_ALIAS; }); it('revokes persisted key on startup and stores newly issued key', async () => { - const store = new InMemoryKeyStore({ alias: 'agents/unit-test', key: 'sk-old', expiresAt: null }); + const store = new InMemoryKeyStore({ alias: 'agyn_key', key: 'sk-old', expiresAt: null }); const expires = new Date(Date.now() + 30 * 60 * 1000).toISOString(); const fetchMock = vi.fn(async (input: RequestInfo | URL) => { const url = getUrl(input); diff --git a/packages/platform-server/__tests__/live.graph.runtime.simpleAgent.config.propagation.test.ts b/packages/platform-server/__tests__/live.graph.runtime.simpleAgent.config.propagation.test.ts index 4f5a210f0..98274f510 100644 --- a/packages/platform-server/__tests__/live.graph.runtime.simpleAgent.config.propagation.test.ts +++ b/packages/platform-server/__tests__/live.graph.runtime.simpleAgent.config.propagation.test.ts @@ -39,7 +39,6 @@ describe('LiveGraphRuntime -> Agent config propagation', () => { githubAppId: 'test', githubAppPrivateKey: 'test', githubInstallationId: 'test', - openaiApiKey: 'test', githubToken: 'test', graphRepoPath: './data/graph', graphBranch: 'graph-state', @@ -71,7 +70,6 @@ describe('LiveGraphRuntime -> Agent config propagation', () => { ncpsAuthHeader: undefined, ncpsAuthToken: undefined, agentsDatabaseUrl: 'postgres://localhost:5432/test', - llmProvider: 'openai', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', corsOrigins: [], diff --git a/packages/platform-server/__tests__/llm.settings.bootstrap.test.ts b/packages/platform-server/__tests__/llm.settings.bootstrap.test.ts index e9e90a721..a8c0cae82 100644 --- a/packages/platform-server/__tests__/llm.settings.bootstrap.test.ts +++ b/packages/platform-server/__tests__/llm.settings.bootstrap.test.ts @@ -17,7 +17,6 @@ describe('LLMSettingsModule bootstrap', () => { const config = ConfigService.register( new ConfigService().init( configSchema.parse({ - llmProvider: 'litellm', litellmBaseUrl: 'http://127.0.0.1:4000', litellmMasterKey: 'sk-test-master', agentsDatabaseUrl: 'postgres://postgres:postgres@localhost:5432/test', diff --git a/packages/platform-server/__tests__/llm.settings.service.test.ts b/packages/platform-server/__tests__/llm.settings.service.test.ts index 3f0d13b74..52037e3a1 100644 --- a/packages/platform-server/__tests__/llm.settings.service.test.ts +++ b/packages/platform-server/__tests__/llm.settings.service.test.ts @@ -8,7 +8,6 @@ import { ConfigService, configSchema, type Config } from '../src/core/services/c const BASE_URL = 'http://litellm.test'; const defaultConfig: Partial = { - llmProvider: 'litellm', litellmBaseUrl: BASE_URL, litellmMasterKey: 'sk-master', agentsDatabaseUrl: 'postgres://dev:dev@localhost:5432/agents', @@ -742,24 +741,6 @@ describe.sequential('LLMSettingsService', () => { }); }); - it('reports provider mismatch when LiteLLM mode is disabled', async () => { - const config = { - llmProvider: 'openai', - litellmBaseUrl: BASE_URL, - litellmMasterKey: 'sk-master', - isInitialized: () => true, - } as unknown as ConfigService; - const service = new LLMSettingsService(config); - const status = await service.getAdminStatus(); - expect(status).toMatchObject({ - configured: false, - baseUrl: BASE_URL, - hasMasterKey: true, - provider: 'openai', - reason: 'provider_mismatch', - }); - }); - it('confirms admin reachability when LiteLLM responds', async () => { const scope = nock(BASE_URL) .get('/public/providers/fields') diff --git a/packages/platform-server/__tests__/manage.tool.test.ts b/packages/platform-server/__tests__/manage.tool.test.ts index ed72ad06e..24817623d 100644 --- a/packages/platform-server/__tests__/manage.tool.test.ts +++ b/packages/platform-server/__tests__/manage.tool.test.ts @@ -72,7 +72,6 @@ async function createHarness(options: { persistence?: AgentsPersistenceService } provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://localhost/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', @@ -367,7 +366,6 @@ describe('ManageTool unit', () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://localhost/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', @@ -430,7 +428,6 @@ describe('ManageTool graph wiring', () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://localhost/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/mcp.enabledTools.boot.integration.test.ts b/packages/platform-server/__tests__/mcp.enabledTools.boot.integration.test.ts index 7a9847c78..02b9fdd98 100644 --- a/packages/platform-server/__tests__/mcp.enabledTools.boot.integration.test.ts +++ b/packages/platform-server/__tests__/mcp.enabledTools.boot.integration.test.ts @@ -74,11 +74,8 @@ class StubConfigService extends ConfigService { githubAppId: 'test', githubAppPrivateKey: 'test', githubInstallationId: 'test', - openaiApiKey: 'test', - llmProvider: 'openai', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', - openaiBaseUrl: undefined, githubToken: 'test', graphRepoPath: './data/graph', graphBranch: 'graph-state', diff --git a/packages/platform-server/__tests__/mixed.shell.mcp.isolation.test.ts b/packages/platform-server/__tests__/mixed.shell.mcp.isolation.test.ts index 014a47d18..2f071d586 100644 --- a/packages/platform-server/__tests__/mixed.shell.mcp.isolation.test.ts +++ b/packages/platform-server/__tests__/mixed.shell.mcp.isolation.test.ts @@ -13,7 +13,6 @@ describe('Mixed Shell + MCP overlay isolation', () => { const workspaceNode = new WorkspaceNodeStub(provider); const cfg = new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://localhost/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/ncpsKey.service.test.ts b/packages/platform-server/__tests__/ncpsKey.service.test.ts index b8a4798b9..d7a2a7cb9 100644 --- a/packages/platform-server/__tests__/ncpsKey.service.test.ts +++ b/packages/platform-server/__tests__/ncpsKey.service.test.ts @@ -4,8 +4,8 @@ import { NcpsKeyService } from '../src/infra/ncps/ncpsKey.service'; describe('NcpsKeyService', () => { const baseEnv = { - githubAppId: 'x', githubAppPrivateKey: 'x', githubInstallationId: 'x', openaiApiKey: 'x', githubToken: 'x', - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://localhost/agents', + githubAppId: 'x', githubAppPrivateKey: 'x', githubInstallationId: 'x', githubToken: 'x', + agentsDatabaseUrl: 'postgres://localhost/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', graphRepoPath: './data/graph', graphBranch: 'graph-state', dockerMirrorUrl: 'http://registry-mirror:5000', nixAllowedChannels: 'nixpkgs-unstable', nixHttpTimeoutMs: '5000', nixCacheTtlMs: String(300000), nixCacheMax: '500', diff --git a/packages/platform-server/__tests__/nix.controller.test.ts b/packages/platform-server/__tests__/nix.controller.test.ts index 42cfe9444..c7564defc 100644 --- a/packages/platform-server/__tests__/nix.controller.test.ts +++ b/packages/platform-server/__tests__/nix.controller.test.ts @@ -60,15 +60,14 @@ describe.sequential('NixController', () => { beforeEach(() => { const cfg = new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', githubAppId: 'x', - githubAppPrivateKey: 'x', - githubInstallationId: 'x', - githubToken: 'x', - agentsDatabaseUrl: 'postgres://localhost:5432/agents', - litellmBaseUrl: 'http://localhost:4000', - litellmMasterKey: 'sk-test', - graphRepoPath: './data/graph', + githubAppPrivateKey: 'x', + githubInstallationId: 'x', + githubToken: 'x', + agentsDatabaseUrl: 'postgres://localhost:5432/agents', + litellmBaseUrl: 'http://localhost:4000', + litellmMasterKey: 'sk-test', + graphRepoPath: './data/graph', graphBranch: 'graph-state', dockerMirrorUrl: 'http://registry-mirror:5000', nixAllowedChannels: 'nixpkgs-unstable', diff --git a/packages/platform-server/__tests__/nix.e2e.test.ts b/packages/platform-server/__tests__/nix.e2e.test.ts index b7238df22..dfa8b2286 100644 --- a/packages/platform-server/__tests__/nix.e2e.test.ts +++ b/packages/platform-server/__tests__/nix.e2e.test.ts @@ -13,7 +13,6 @@ describe('NixController E2E (Fastify)', () => { beforeAll(async () => { const cfg = new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', githubAppId: 'x', githubAppPrivateKey: 'x', githubInstallationId: 'x', githubToken: 'x', agentsDatabaseUrl: 'postgres://localhost:5432/agents', litellmBaseUrl: 'http://localhost:4000', diff --git a/packages/platform-server/__tests__/nixRepo.controller.test.ts b/packages/platform-server/__tests__/nixRepo.controller.test.ts index 906d91c9c..f020f4ba7 100644 --- a/packages/platform-server/__tests__/nixRepo.controller.test.ts +++ b/packages/platform-server/__tests__/nixRepo.controller.test.ts @@ -1,12 +1,10 @@ -import nock from 'nock'; import { beforeEach, afterEach, describe, it, expect, vi } from 'vitest'; import type { FastifyReply } from 'fastify'; +import { Response } from 'node-fetch-native'; import { NixRepoController } from '../src/infra/ncps/nixRepo.controller'; import { ConfigService, configSchema } from '../src/core/services/config.service'; -const API_BASE = 'https://api.github.com'; - const createReply = (): FastifyReply => { const reply = {} as FastifyReply; Object.assign(reply, { @@ -21,15 +19,11 @@ const codeCalls = (reply: FastifyReply) => ((reply.code as unknown as { mock?: { describe('NixRepoController', () => { let controller: NixRepoController; let reply: FastifyReply; + let execGitMock: ReturnType; beforeEach(() => { const cfg = new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', - githubAppId: 'app', - githubAppPrivateKey: 'key', - githubInstallationId: 'inst', - githubToken: 'token', agentsDatabaseUrl: 'postgres://localhost:5432/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', @@ -40,24 +34,16 @@ describe('NixRepoController', () => { ); controller = new NixRepoController(cfg); reply = createReply(); + execGitMock = vi.spyOn(controller as unknown as { execGit: (...args: string[]) => Promise }, 'execGit'); }); afterEach(() => { - nock.cleanAll(); vi.clearAllMocks(); }); it('resolves repository, default branch, and returns canonical payload', async () => { - const repoScope = nock(API_BASE) - .get('/repos/Owner/Repo') - .reply(200, { full_name: 'Owner/Repo', default_branch: 'main' }); - const commitScope = nock(API_BASE) - .get('/repos/Owner/Repo/commits/main') - .reply(200, { sha: 'ABCDEF1234567890ABCDEF1234567890ABCDEF12' }); - const flakeScope = nock(API_BASE) - .get('/repos/Owner/Repo/contents/flake.nix') - .query((q) => q.ref === 'abcdef1234567890abcdef1234567890abcdef12') - .reply(200, 'flake contents'); + mockGitSuccess(execGitMock, 'https://github.com/Owner/Repo.git', 'main', 'ABCDEF1234567890ABCDEF1234567890ABCDEF12'); + controller.setFetchImpl(async () => new Response('flake contents', { status: 200 })); const body = await controller.resolveRepo( { repository: 'Owner/Repo', attr: 'packages.x86_64-linux.hello' }, @@ -73,22 +59,13 @@ describe('NixRepoController', () => { flakeUri: 'github:Owner/Repo/abcdef1234567890abcdef1234567890abcdef12#packages.x86_64-linux.hello', attrCheck: 'skipped', }); - repoScope.done(); - commitScope.done(); - flakeScope.done(); }); it('normalizes https repository input and trims ref', async () => { - const repoScope = nock(API_BASE) - .get('/repos/owner/example') - .reply(200, { full_name: 'owner/example', default_branch: 'default' }); - const commitScope = nock(API_BASE) - .get('/repos/owner/example/commits/v1.2.3') - .reply(200, { sha: '1234567890abcdef1234567890abcdef12345678' }); - const flakeScope = nock(API_BASE) - .get('/repos/owner/example/contents/flake.nix') - .query((q) => q.ref === '1234567890abcdef1234567890abcdef12345678') - .reply(200, 'flake'); + mockGitSuccess(execGitMock, 'https://github.com/owner/example.git', 'default', '1234567890ABCDEF1234567890ABCDEF12345678', { + explicitRef: 'v1.2.3', + }); + controller.setFetchImpl(async () => new Response('flake', { status: 200 })); const body = await controller.resolveRepo( { repository: 'https://github.com/owner/example.git', ref: ' v1.2.3 ', attr: 'packages.foo.bar' }, @@ -98,19 +75,11 @@ describe('NixRepoController', () => { expect(codeCalls(reply).at(-1)?.[0]).toBe(200); expect(body.ref).toBe('v1.2.3'); expect(body.repository).toBe('github:owner/example'); - repoScope.done(); - commitScope.done(); - flakeScope.done(); }); it('returns 400 when repository is outside allowlist', async () => { const cfg = new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', - githubAppId: 'app', - githubAppPrivateKey: 'key', - githubInstallationId: 'inst', - githubToken: 'token', agentsDatabaseUrl: 'postgres://localhost:5432/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', @@ -132,12 +101,8 @@ describe('NixRepoController', () => { }); it('returns 404 when ref is missing', async () => { - const repoScope = nock(API_BASE) - .get('/repos/owner/example') - .reply(200, { full_name: 'owner/example', default_branch: 'main' }); - const commitScope = nock(API_BASE) - .get('/repos/owner/example/commits/missing') - .reply(404, {}); + mockGitMissingRef(execGitMock, 'https://github.com/owner/example.git', 'main', 'missing'); + controller.setFetchImpl(async () => new Response('flake', { status: 200 })); const body = await controller.resolveRepo( { repository: 'owner/example', ref: 'missing', attr: 'packages.foo' }, @@ -146,21 +111,11 @@ describe('NixRepoController', () => { expect(codeCalls(reply).at(-1)?.[0]).toBe(404); expect(body).toEqual({ error: 'ref_not_found' }); - repoScope.done(); - commitScope.done(); }); it('returns 409 when flake.nix is absent', async () => { - const repoScope = nock(API_BASE) - .get('/repos/owner/flake-less') - .reply(200, { full_name: 'owner/flake-less', default_branch: 'main' }); - const commitScope = nock(API_BASE) - .get('/repos/owner/flake-less/commits/main') - .reply(200, { sha: 'abcdefabcdefabcdefabcdefabcdefabcdefabcd' }); - const flakeScope = nock(API_BASE) - .get('/repos/owner/flake-less/contents/flake.nix') - .query((q) => q.ref === 'abcdefabcdefabcdefabcdefabcdefabcdefabcd') - .reply(404, {}); + mockGitSuccess(execGitMock, 'https://github.com/owner/flake-less.git', 'main', 'abcdefabcdefabcdefabcdefabcdefabcdefabcd'); + controller.setFetchImpl(async () => new Response('not found', { status: 404 })); const body = await controller.resolveRepo( { repository: 'owner/flake-less', attr: 'packages.x86_64.bar' }, @@ -169,15 +124,11 @@ describe('NixRepoController', () => { expect(codeCalls(reply).at(-1)?.[0]).toBe(409); expect(body).toEqual({ error: 'non_flake_repo' }); - repoScope.done(); - commitScope.done(); - flakeScope.done(); }); it('maps GitHub authentication failures to 401 unauthorized_private_repo', async () => { - const repoScope = nock(API_BASE) - .get('/repos/owner/private') - .reply(403, { message: 'Requires authentication' }); + mockGitSuccess(execGitMock, 'https://github.com/owner/private.git', 'main', 'abcdefabcdefabcdefabcdefabcdefabcdefabcd'); + controller.setFetchImpl(async () => new Response('unauthorized', { status: 403 })); const body = await controller.resolveRepo( { repository: 'owner/private', attr: 'packages.foo' }, @@ -186,6 +137,69 @@ describe('NixRepoController', () => { expect(codeCalls(reply).at(-1)?.[0]).toBe(401); expect(body).toEqual({ error: 'unauthorized_private_repo' }); - repoScope.done(); }); }); + +function mockGitSuccess( + mock: ReturnType, + remote: string, + defaultBranch: string, + commitSha: string, + options: { explicitRef?: string } = {}, +): void { + const normalizedSha = commitSha.toLowerCase(); + const refToResolve = options.explicitRef?.trim() ?? defaultBranch; + mock.mockImplementation(async (args: string[]) => { + if (args[0] !== 'ls-remote') { + throw new Error(`unexpected git command ${args.join(' ')}`); + } + if (args[1] === '--symref') { + const [, , targetRemote, target] = args; + if (targetRemote !== remote || target !== 'HEAD') { + throw new Error('unexpected symref invocation'); + } + return `ref: refs/heads/${defaultBranch}\tHEAD\n${normalizedSha}\tHEAD\n`; + } + const targetRemote = args[1]; + const pattern = args[2]; + if (targetRemote !== remote) { + throw new Error('unexpected remote'); + } + if (pattern === refToResolve || pattern === `refs/heads/${refToResolve}`) { + return `${normalizedSha}\trefs/heads/${refToResolve}`; + } + if (pattern === `refs/tags/${refToResolve}` || pattern === `refs/tags/${refToResolve}^{}`) { + return ''; + } + return ''; + }); +} + +function mockGitMissingRef( + mock: ReturnType, + remote: string, + defaultBranch: string, + missingRef: string, +): void { + mock.mockImplementation(async (args: string[]) => { + if (args[0] !== 'ls-remote') { + throw new Error('unexpected command'); + } + if (args[1] === '--symref') { + const [, , targetRemote, target] = args; + if (targetRemote !== remote || target !== 'HEAD') { + throw new Error('unexpected symref invocation'); + } + return `ref: refs/heads/${defaultBranch}\tHEAD\n${defaultBranch}\tHEAD\n`; + } + const targetRemote = args[1]; + if (targetRemote !== remote) { + throw new Error('unexpected remote'); + } + const pattern = args[2]; + if (pattern.includes(missingRef)) { + return ''; + } + return ''; + }); +} diff --git a/packages/platform-server/__tests__/nodes.module.di.smoke.test.ts b/packages/platform-server/__tests__/nodes.module.di.smoke.test.ts index 49bace940..d95d1074a 100644 --- a/packages/platform-server/__tests__/nodes.module.di.smoke.test.ts +++ b/packages/platform-server/__tests__/nodes.module.di.smoke.test.ts @@ -7,7 +7,6 @@ import { SlackAdapter } from '../src/messaging/slack/slack.adapter'; import { EventsBusService } from '../src/events/events-bus.service'; import { createReferenceResolverStub } from './helpers/reference-resolver.stub'; -process.env.LLM_PROVIDER = process.env.LLM_PROVIDER || 'litellm'; process.env.AGENTS_DATABASE_URL = process.env.AGENTS_DATABASE_URL || 'postgres://localhost:5432/test'; const shouldRunDbTests = process.env.RUN_DB_TESTS === 'true'; diff --git a/packages/platform-server/__tests__/openai.provisioner.di.test.ts b/packages/platform-server/__tests__/openai.provisioner.di.test.ts deleted file mode 100644 index 8cf84468e..000000000 --- a/packages/platform-server/__tests__/openai.provisioner.di.test.ts +++ /dev/null @@ -1,24 +0,0 @@ -import { describe, it, expect } from 'vitest'; - -import { OpenAILLMProvisioner } from '../src/llm/provisioners/openai.provisioner'; -import { ConfigService, configSchema, type Config } from '../src/core/services/config.service'; - -const baseConfig: Partial = { - llmProvider: 'openai', - openaiApiKey: 'sk-test', - litellmBaseUrl: 'http://127.0.0.1:4000', - litellmMasterKey: 'sk-master', - agentsDatabaseUrl: 'postgres://dev:dev@localhost:5432/agents', -}; - -describe('OpenAILLMProvisioner DI enforcement', () => { - it('throws when ConfigService is not initialized through Nest', () => { - const cfg = new ConfigService(); - expect(() => new OpenAILLMProvisioner(cfg)).toThrow(/ConfigService injected before initialization/); - }); - - it('can be constructed when ConfigService is initialized', () => { - const cfg = new ConfigService().init(configSchema.parse(baseConfig)); - expect(() => new OpenAILLMProvisioner(cfg)).not.toThrow(); - }); -}); diff --git a/packages/platform-server/__tests__/simpleAgent.summarization.graph.test.ts b/packages/platform-server/__tests__/simpleAgent.summarization.graph.test.ts index 6f60f45bc..a069e2db8 100644 --- a/packages/platform-server/__tests__/simpleAgent.summarization.graph.test.ts +++ b/packages/platform-server/__tests__/simpleAgent.summarization.graph.test.ts @@ -22,7 +22,6 @@ describe('Agent summarization graph', () => { provide: ConfigService, useValue: new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: 'postgres://localhost/agents', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/templates.memory.registration.test.ts b/packages/platform-server/__tests__/templates.memory.registration.test.ts index fea4dc340..8c530b612 100644 --- a/packages/platform-server/__tests__/templates.memory.registration.test.ts +++ b/packages/platform-server/__tests__/templates.memory.registration.test.ts @@ -23,7 +23,6 @@ describe('templates: memory registration and agent memory port', () => { maybeIt('registers memory and memoryConnector templates and exposes Agent memory target port', async () => { const configService = new ConfigService().init( configSchema.parse({ - llmProvider: 'openai', agentsDatabaseUrl: process.env.AGENTS_DATABASE_URL || 'postgres://localhost/skip', litellmBaseUrl: 'http://localhost:4000', litellmMasterKey: 'sk-test', diff --git a/packages/platform-server/__tests__/vitest.setup.ts b/packages/platform-server/__tests__/vitest.setup.ts index 8884ded41..cc9543636 100644 --- a/packages/platform-server/__tests__/vitest.setup.ts +++ b/packages/platform-server/__tests__/vitest.setup.ts @@ -2,4 +2,3 @@ import 'reflect-metadata'; process.env.LITELLM_BASE_URL ||= 'http://127.0.0.1:4000'; process.env.LITELLM_MASTER_KEY ||= 'sk-dev-master-1234'; -process.env.CONTEXT_ITEM_NULL_GUARD ||= '1'; diff --git a/packages/platform-server/src/core/services/config.service.ts b/packages/platform-server/src/core/services/config.service.ts index 903d63d64..598cb7935 100644 --- a/packages/platform-server/src/core/services/config.service.ts +++ b/packages/platform-server/src/core/services/config.service.ts @@ -4,14 +4,6 @@ import { z } from 'zod'; dotenv.config(); export const configSchema = z.object({ - // GitHub settings are optional to allow dev boot without GitHub - githubAppId: z.string().min(1).optional(), - githubAppPrivateKey: z.string().min(1).optional(), - githubInstallationId: z.string().min(1).optional(), - // LLM provider selection: must be explicit; no default - llmProvider: z.enum(['openai', 'litellm']), - openaiApiKey: z.string().optional(), - openaiBaseUrl: z.string().optional(), // LiteLLM admin configuration (required) litellmBaseUrl: z .string() @@ -23,7 +15,11 @@ export const configSchema = z.object({ .string() .min(1, 'LITELLM_MASTER_KEY is required') .transform((value) => value.trim()), - githubToken: z.string().min(1).optional(), + // Optional GitHub credentials (App or PAT) + githubAppId: z.string().optional(), + githubAppPrivateKey: z.string().optional(), + githubInstallationId: z.string().optional(), + githubToken: z.string().optional(), // Graph persistence graphRepoPath: z.string().default('./data/graph'), graphBranch: z.string().default('graph-state'), @@ -228,32 +224,26 @@ export class ConfigService implements Config { return this._params !== undefined; } + get litellmBaseUrl(): string { + return this.params.litellmBaseUrl; + } + get litellmMasterKey(): string { + return this.params.litellmMasterKey; + } + + get llmProvider(): 'litellm' { + return 'litellm'; + } + get githubAppId(): string | undefined { return this.params.githubAppId; } - get githubAppPrivateKey(): string | undefined { return this.params.githubAppPrivateKey; } get githubInstallationId(): string | undefined { return this.params.githubInstallationId; } - - get llmProvider(): 'openai' | 'litellm' { - return this.params.llmProvider; - } - get openaiApiKey(): string | undefined { - return this.params.openaiApiKey; - } - get openaiBaseUrl(): string | undefined { - return this.params.openaiBaseUrl; - } - get litellmBaseUrl(): string { - return this.params.litellmBaseUrl; - } - get litellmMasterKey(): string { - return this.params.litellmMasterKey; - } get githubToken(): string | undefined { return this.params.githubToken; } @@ -378,12 +368,11 @@ export class ConfigService implements Config { const urlServer = process.env.NCPS_URL_SERVER || legacy; const urlContainer = process.env.NCPS_URL_CONTAINER || legacy; const parsed = configSchema.parse({ + litellmBaseUrl: process.env.LITELLM_BASE_URL, + litellmMasterKey: process.env.LITELLM_MASTER_KEY, githubAppId: process.env.GITHUB_APP_ID, githubAppPrivateKey: process.env.GITHUB_APP_PRIVATE_KEY, githubInstallationId: process.env.GITHUB_INSTALLATION_ID, - llmProvider: process.env.LLM_PROVIDER, - litellmBaseUrl: process.env.LITELLM_BASE_URL, - litellmMasterKey: process.env.LITELLM_MASTER_KEY, githubToken: process.env.GH_TOKEN, // Pass raw env; schema will validate/assign default graphRepoPath: process.env.GRAPH_REPO_PATH, diff --git a/packages/platform-server/src/index.ts b/packages/platform-server/src/index.ts index 06398476c..8447500b6 100644 --- a/packages/platform-server/src/index.ts +++ b/packages/platform-server/src/index.ts @@ -1,11 +1,5 @@ import 'reflect-metadata'; -const envName = (process.env.NODE_ENV ?? '').toLowerCase(); -const isProduction = envName === 'production' || (process.env.AGENTS_ENV ?? '').toLowerCase() === 'production'; -if (!isProduction && process.env.CONTEXT_ITEM_NULL_GUARD === undefined) { - process.env.CONTEXT_ITEM_NULL_GUARD = '1'; -} - import { Logger, ValidationPipe } from '@nestjs/common'; import { NestFactory } from '@nestjs/core'; import { FastifyAdapter } from '@nestjs/platform-fastify'; diff --git a/packages/platform-server/src/infra/ncps/nixRepo.controller.ts b/packages/platform-server/src/infra/ncps/nixRepo.controller.ts index 82e8990d2..c9478e01e 100644 --- a/packages/platform-server/src/infra/ncps/nixRepo.controller.ts +++ b/packages/platform-server/src/infra/ncps/nixRepo.controller.ts @@ -2,6 +2,7 @@ import { Controller, Get, Inject, Query, Res } from '@nestjs/common'; import type { FastifyReply } from 'fastify'; import { z, ZodError } from 'zod'; import { fetch as nodeFetch, Response } from 'node-fetch-native'; +import { execFile } from 'node:child_process'; import { ConfigService } from '../../core/services/config.service'; const ATTRIBUTE_SEGMENT = /^[A-Za-z0-9_.+-]+$/; @@ -9,6 +10,14 @@ const ATTRIBUTE_PATH = new RegExp(`^(?:${ATTRIBUTE_SEGMENT.source})(?:\\.(?:${AT const OWNER_REPO_IDENT = /^[A-Za-z0-9_.-]+$/; type NormalizedRepository = { owner: string; repo: string; input: string }; +type ResolveRepoPayload = { + repository: string; + ref: string; + commitHash: string; + attributePath: string; + flakeUri: string; + attrCheck: 'skipped'; +}; @Controller('api/nix') export class NixRepoController { @@ -21,13 +30,11 @@ export class NixRepoController { .strict(); private readonly timeoutMs: number; - private readonly githubToken?: string; private readonly repoAllowlist: string[]; private fetchImpl: (input: RequestInfo | URL, init?: RequestInit) => Promise; constructor(@Inject(ConfigService) private readonly config: ConfigService) { this.timeoutMs = config.nixHttpTimeoutMs; - this.githubToken = config.githubToken; this.repoAllowlist = (config.nixRepoAllowlist ?? []).map((entry) => entry.toLowerCase()); this.fetchImpl = nodeFetch as unknown as typeof fetch; } @@ -57,29 +64,9 @@ export class NixRepoController { return { error: 'repository_not_allowed', repository: normalized.input }; } - const effectiveRef = typeof rawRef === 'string' ? rawRef.trim() : ''; - const ac = new AbortController(); - const timer = setTimeout(() => ac.abort(), this.timeoutMs); - try { - const repoInfo = await this.fetchRepo(normalized, ac.signal); - const defaultBranch = repoInfo.default_branch || 'main'; - const targetRef = effectiveRef.length > 0 ? effectiveRef : defaultBranch; - const commitSha = await this.resolveCommit(normalized, targetRef, ac.signal); - await this.ensureFlakePresent(normalized, commitSha, ac.signal); - - const canonicalRepository = `github:${repoInfo.full_name}`; - reply.code(200); - return { - repository: canonicalRepository, - ref: targetRef, - commitHash: commitSha, - attributePath: attr, - flakeUri: `${canonicalRepository}/${commitSha}#${attr}`, - attrCheck: 'skipped' as const, - }; - } finally { - clearTimeout(timer); - } + const resolution = await this.performRepositoryResolution(normalized, attr, rawRef); + reply.code(200); + return resolution; } catch (err) { if (err instanceof ZodError) { reply.code(400); @@ -95,6 +82,55 @@ export class NixRepoController { } } + private async performRepositoryResolution( + normalized: NormalizedRepository, + attr: string, + rawRef: string | undefined, + ): Promise { + const effectiveRef = typeof rawRef === 'string' ? rawRef.trim() : ''; + const ac = new AbortController(); + const timer = setTimeout(() => ac.abort(), this.timeoutMs); + try { + const targetRef = await this.resolveTargetRef(normalized, effectiveRef, ac.signal); + const commitSha = await this.resolveCommitSha(normalized, targetRef, ac.signal); + await this.ensureFlakePresent(normalized, commitSha, ac.signal); + const canonicalRepository = `github:${normalized.owner}/${normalized.repo}`; + return { + repository: canonicalRepository, + ref: targetRef, + commitHash: commitSha, + attributePath: attr, + flakeUri: `${canonicalRepository}/${commitSha}#${attr}`, + attrCheck: 'skipped', + }; + } finally { + clearTimeout(timer); + } + } + + private async resolveTargetRef( + normalized: NormalizedRepository, + effectiveRef: string, + signal: AbortSignal, + ): Promise { + if (effectiveRef) { + return effectiveRef; + } + const defaultBranch = await this.determineDefaultBranch(normalized, signal); + return defaultBranch; + } + + private async resolveCommitSha( + normalized: NormalizedRepository, + targetRef: string, + signal: AbortSignal, + ): Promise { + if (/^[0-9a-f]{40}$/i.test(targetRef)) { + return targetRef.toLowerCase(); + } + return this.resolveGitReference(normalized, targetRef, signal); + } + private normalizeRepository(input: string): NormalizedRepository | null { const trimmed = input.trim(); if (!trimmed) return null; @@ -116,76 +152,165 @@ export class NixRepoController { return { owner, repo, input }; } - private async fetchRepo(repo: NormalizedRepository, signal: AbortSignal): Promise<{ full_name: string; default_branch: string }> { - const path = `/repos/${repo.owner}/${repo.repo}`; - const res = await this.githubRequest(path, signal); - if (res.status === 404) throw new FetchErrorResponse(404, { error: 'repo_not_found' }); - if (!res.ok) { - throw new FetchErrorResponse(this.mapGithubErrorStatus(res.status), { error: 'github_error', status: res.status }); + private async determineDefaultBranch(repo: NormalizedRepository, signal: AbortSignal): Promise { + try { + const stdout = await this.execGit(['ls-remote', '--symref', this.buildGitRemote(repo), 'HEAD'], signal); + const lines = stdout.split('\n').map((line) => line.trim()).filter(Boolean); + for (const line of lines) { + if (!line.startsWith('ref:')) continue; + const [refPart, headPart] = line.split('\t'); + if (headPart !== 'HEAD') continue; + const match = /^ref:\s+refs\/heads\/(.+)$/.exec(refPart); + if (match?.[1]) { + return match[1].trim(); + } + } + const headLine = lines.find((line) => /\bHEAD$/.test(line)); + if (headLine) { + const inferred = headLine.split('\t')[1]; + if (inferred) return inferred.trim(); + } + } catch (error) { + if (error instanceof GitCommandError) { + this.handleGitRepositoryError(error); + } + throw error; } - const body = await this.parseGithubJson(res); - const fullName = typeof body?.full_name === 'string' ? body.full_name : `${repo.owner}/${repo.repo}`; - const defaultBranch = typeof body?.default_branch === 'string' && body.default_branch.trim().length > 0 ? body.default_branch : 'main'; - return { full_name: fullName, default_branch: defaultBranch }; + return 'main'; } - private async resolveCommit(repo: NormalizedRepository, ref: string, signal: AbortSignal): Promise { - const path = `/repos/${repo.owner}/${repo.repo}/commits/${encodeURIComponent(ref)}`; - const res = await this.githubRequest(path, signal); - if (res.status === 404 || res.status === 422) { - throw new FetchErrorResponse(404, { error: 'ref_not_found' }); - } - if (!res.ok) { - throw new FetchErrorResponse(this.mapGithubErrorStatus(res.status), { error: 'github_error', status: res.status }); - } - const body = await this.parseGithubJson(res); - const sha = typeof body?.sha === 'string' ? body.sha.trim() : ''; - if (!/^[0-9a-fA-F]{40}$/.test(sha)) { - throw new FetchErrorResponse(502, { error: 'invalid_commit_hash' }); + private async resolveGitReference(repo: NormalizedRepository, ref: string, signal: AbortSignal): Promise { + const patterns = this.buildRefPatterns(ref); + for (const pattern of patterns) { + const sha = await this.resolvePatternSha(repo, pattern, signal); + if (sha) { + return sha; + } } - return sha.toLowerCase(); + throw new FetchErrorResponse(404, { error: 'ref_not_found' }); } - private async ensureFlakePresent(repo: NormalizedRepository, commitSha: string, signal: AbortSignal): Promise { - const path = `/repos/${repo.owner}/${repo.repo}/contents/flake.nix?ref=${commitSha}`; - const res = await this.githubRequest(path, signal, true); - if (res.status === 404) throw new FetchErrorResponse(409, { error: 'non_flake_repo' }); - if (!res.ok) { - throw new FetchErrorResponse(this.mapGithubErrorStatus(res.status), { error: 'github_error', status: res.status }); + private async resolvePatternSha( + repo: NormalizedRepository, + pattern: string, + signal: AbortSignal, + ): Promise { + try { + const stdout = await this.execGit(['ls-remote', this.buildGitRemote(repo), pattern], signal); + const lines = stdout.split('\n').map((line) => line.trim()).filter(Boolean); + if (lines.length === 0) { + return null; + } + const parsed = lines + .map((line) => { + const [sha, ref] = line.split(/\s+/); + if (!sha || !ref || !/^[0-9a-fA-F]{40}$/.test(sha)) { + return null; + } + return { sha: sha.toLowerCase(), ref }; + }) + .filter((entry): entry is { sha: string; ref: string } => entry !== null); + + if (parsed.length === 0) { + return null; + } + + const peeled = parsed.find((entry) => entry.ref.endsWith('^{}')); + if (peeled) { + return peeled.sha; + } + + return parsed[0]?.sha ?? null; + } catch (error) { + if (error instanceof GitCommandError) { + this.handleGitRepositoryError(error); + } + throw error; } } - private async githubRequest(path: string, signal: AbortSignal, allowRaw = false): Promise { - const url = new URL(path, 'https://api.github.com'); - const headers: Record = { - Accept: allowRaw ? 'application/vnd.github.raw' : 'application/vnd.github+json', - 'User-Agent': 'hautech-agents', - }; - if (this.githubToken) headers.Authorization = `Bearer ${this.githubToken}`; - const res = await this.fetchImpl(url, { headers, signal }); - if ([401, 403].includes(res.status)) { + private buildRefPatterns(ref: string): string[] { + const trimmed = ref.trim(); + if (!trimmed) return ['HEAD']; + if (trimmed.startsWith('refs/')) return [trimmed]; + const annotatedTag = `refs/tags/${trimmed}` + '^{}'; + const patterns = [ + trimmed, + `refs/heads/${trimmed}`, + `refs/tags/${trimmed}`, + annotatedTag, + ]; + return patterns; + } + + private buildGitRemote(repo: NormalizedRepository): string { + return `https://github.com/${repo.owner}/${repo.repo}.git`; + } + + private async ensureFlakePresent(repo: NormalizedRepository, commitSha: string, signal: AbortSignal): Promise { + const rawUrl = `https://raw.githubusercontent.com/${repo.owner}/${repo.repo}/${commitSha}/flake.nix`; + const res = await this.fetchImpl(rawUrl, { + headers: { + 'User-Agent': 'hautech-agents', + }, + signal, + }); + if (res.status === 404) { + throw new FetchErrorResponse(409, { error: 'non_flake_repo' }); + } + if (res.status === 401 || res.status === 403) { throw new FetchErrorResponse(401, { error: 'unauthorized_private_repo' }); } if (res.status >= 500) { throw new FetchErrorResponse(502, { error: 'github_error', status: res.status }); } - return res; + if (!res.ok) { + throw new FetchErrorResponse(500, { error: 'github_error', status: res.status }); + } + // Fully consume body to allow caller reuse of socket pool. + await res.arrayBuffer(); } - private async parseGithubJson(res: Response): Promise> { - try { - const json = (await res.json()) as Record; - return json ?? {}; - } catch (_err) { - throw new FetchErrorResponse(502, { error: 'bad_github_json' }); + private async execGit(args: string[], signal: AbortSignal): Promise { + const env = { ...process.env, GIT_TERMINAL_PROMPT: '0' }; + return new Promise((resolve, reject) => { + execFile('git', args, { signal, env, windowsHide: true, maxBuffer: 5 * 1024 * 1024 }, (error, stdout, stderr) => { + if (error) { + const abortName = (error as Error).name; + const abortCode = (error as NodeJS.ErrnoException).code; + if (abortName === 'AbortError' || abortCode === 'ABORT_ERR') { + reject(error); + return; + } + const execError = error as NodeJS.ErrnoException; + const exitCode = typeof execError.code === 'number' ? execError.code : null; + reject(new GitCommandError(exitCode, stdout, stderr)); + return; + } + resolve(stdout); + }); + }); + } + + private handleGitRepositoryError(error: GitCommandError): never { + const stderr = error.stderr?.toLowerCase() ?? ''; + if (/repository\s+not\s+found/.test(stderr) || /not\s+found/.test(stderr)) { + throw new FetchErrorResponse(404, { error: 'repo_not_found' }); } + if (/access\s+denied/.test(stderr) || /authentication\s+failed/.test(stderr) || /could\s+not\s+read\s+Username/.test(stderr)) { + throw new FetchErrorResponse(401, { error: 'unauthorized_private_repo' }); + } + throw new FetchErrorResponse(502, { error: 'github_error', status: 502 }); } +} - private mapGithubErrorStatus(status: number): number { - if (status >= 500) return 502; - if (status === 401 || status === 403) return 401; - if (status === 404) return 404; - return 500; +class GitCommandError extends Error { + constructor( + public readonly exitCode: number | null, + public readonly stdout: string, + public readonly stderr: string, + ) { + super('git_command_failed'); } } diff --git a/packages/platform-server/src/llm/llm.module.ts b/packages/platform-server/src/llm/llm.module.ts index f8c818d1c..53479d4bf 100644 --- a/packages/platform-server/src/llm/llm.module.ts +++ b/packages/platform-server/src/llm/llm.module.ts @@ -9,9 +9,7 @@ import { SummarizationLLMReducer } from './reducers/summarization.llm.reducer'; import { StaticLLMRouter } from './routers/static.llm.router'; import { ConditionalLLMRouter } from './routers/conditional.llm.router'; import { LLMProvisioner } from './provisioners/llm.provisioner'; -import { ConfigService } from '../core/services/config.service'; import { LiteLLMProvisioner } from './provisioners/litellm.provisioner'; -import { OpenAILLMProvisioner } from './provisioners/openai.provisioner'; import { CoreModule } from '../core/core.module'; import { EventsModule } from '../events/events.module'; import { LiteLLMKeyStore } from './provisioners/litellm.key.store'; @@ -21,21 +19,13 @@ import { LiteLLMKeyStore } from './provisioners/litellm.key.store'; providers: [ LiteLLMKeyStore, LiteLLMProvisioner, - OpenAILLMProvisioner, { provide: LLMProvisioner, - useFactory: async ( - cfg: ConfigService, - liteProvisioner: LiteLLMProvisioner, - openaiProvisioner: OpenAILLMProvisioner, - ) => { - if (cfg.llmProvider === 'openai') { - return openaiProvisioner; - } + useFactory: async (liteProvisioner: LiteLLMProvisioner) => { await liteProvisioner.init(); return liteProvisioner; }, - inject: [ConfigService, LiteLLMProvisioner, OpenAILLMProvisioner], + inject: [LiteLLMProvisioner], }, ConversationStateRepository, LoadLLMReducer, diff --git a/packages/platform-server/src/llm/provisioners/litellm.provisioner.ts b/packages/platform-server/src/llm/provisioners/litellm.provisioner.ts index 3489dab44..d08197857 100644 --- a/packages/platform-server/src/llm/provisioners/litellm.provisioner.ts +++ b/packages/platform-server/src/llm/provisioners/litellm.provisioner.ts @@ -1,7 +1,6 @@ import { LLM } from '@agyn/llm'; import { Inject, Injectable, Logger } from '@nestjs/common'; import OpenAI from 'openai'; -import os from 'node:os'; import { ConfigService } from '../../core/services/config.service'; import { LLMProvisioner } from './llm.provisioner'; import { LiteLLMKeyStore } from './litellm.key.store'; @@ -41,7 +40,7 @@ export class LiteLLMProvisioner extends LLMProvisioner { } this.keyStore = keyStore; this.fetchImpl = fetchImpl ?? globalThis.fetch.bind(globalThis); - this.keyAlias = this.resolveAlias(); + this.keyAlias = 'agyn_key'; } init(): Promise { @@ -374,21 +373,6 @@ export class LiteLLMProvisioner extends LLMProvisioner { return value?.replace(/(sk-[A-Za-z0-9_-]{6,})/g, '[REDACTED]') ?? value; } - private resolveAlias(): string { - const explicit = (process.env.LITELLM_KEY_ALIAS ?? '').trim(); - if (explicit) return explicit; - - const envName = (process.env.AGENTS_ENV ?? process.env.NODE_ENV ?? 'local').replace(/\s+/g, '-'); - const deployment = - process.env.AGENTS_DEPLOYMENT || - process.env.DEPLOYMENT_ID || - process.env.HOSTNAME || - os.hostname() || - 'unknown'; - const normalizedDeployment = deployment.replace(/\s+/g, '-'); - return `agents/${envName}/${normalizedDeployment}`; - } - private describeError(error: unknown): string { if (!error) return 'unknown'; if (typeof error === 'string') return error; diff --git a/packages/platform-server/src/llm/provisioners/openai.provisioner.ts b/packages/platform-server/src/llm/provisioners/openai.provisioner.ts deleted file mode 100644 index 6ef7fc585..000000000 --- a/packages/platform-server/src/llm/provisioners/openai.provisioner.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { LLM } from '@agyn/llm'; -import { Inject, Injectable } from '@nestjs/common'; -import OpenAI from 'openai'; -import { LLMProvisioner } from './llm.provisioner'; -import { ConfigService } from '../../core/services/config.service'; - -@Injectable() -export class OpenAILLMProvisioner extends LLMProvisioner { - private llm?: LLM; - constructor(@Inject(ConfigService) private readonly cfg: ConfigService) { - super(); - ConfigService.assertInitialized(cfg); - } - - async init(): Promise { - await this.getLLM(); - } - - async getLLM(): Promise { - if (this.llm) return this.llm; - - const apiKey = this.cfg.openaiApiKey; - if (!apiKey) throw new Error('openai_provider_missing_key'); - const baseUrl = this.cfg.openaiBaseUrl; - const client = new OpenAI({ apiKey, baseURL: baseUrl ?? undefined }); - this.llm = new LLM(client); - return this.llm; - } - - async teardown(): Promise { - this.llm = undefined; - } -} diff --git a/packages/platform-server/src/llm/services/context-items.utils.ts b/packages/platform-server/src/llm/services/context-items.utils.ts index 031f2652b..8cdd386cb 100644 --- a/packages/platform-server/src/llm/services/context-items.utils.ts +++ b/packages/platform-server/src/llm/services/context-items.utils.ts @@ -54,33 +54,12 @@ const NULL_CHAR = '\u0000'; type SanitizeField = 'contentText' | 'contentJson' | 'metadata' | 'payload'; -export class ContextItemNullByteGuardError extends Error { - constructor(public readonly info: { field: SanitizeField; path?: string }) { - super(`context_item.null_byte_guard: ${info.path ?? info.field}`); - this.name = 'ContextItemNullByteGuardError'; - } -} - -function isNullGuardEnabled(): boolean { - const raw = process.env.CONTEXT_ITEM_NULL_GUARD ?? process.env.CONTEXT_ITEM_NUL_GUARD; - if (!raw) return false; - return raw === '1' || raw.toLowerCase() === 'true'; -} - function sanitizeString( value: string, logger?: LoggerLike, context?: { field: SanitizeField; path?: string[] }, - guardOverride?: boolean, ): string { if (!value) return value; - const guardEnabled = guardOverride ?? isNullGuardEnabled(); - if (guardEnabled && value.includes(NULL_CHAR)) { - throw new ContextItemNullByteGuardError({ - field: context?.field ?? 'payload', - path: context?.path && context.path.length > 0 ? context.path.join('.') : undefined, - }); - } if (!value.includes(NULL_CHAR)) return value; const sanitized = value.split(NULL_CHAR).join(''); logger?.warn?.('context_items.null_bytes_stripped', { @@ -96,16 +75,15 @@ function sanitizePrismaJson( logger: LoggerLike | undefined, field: Exclude, path: string[] = [], - guardOverride?: boolean, ): unknown { if (typeof value === 'string') { - return sanitizeString(value, logger, { field, path }, guardOverride); + return sanitizeString(value, logger, { field, path }); } if (Array.isArray(value)) { let mutated = false; const next = value.map((entry, index) => { - const sanitized = sanitizePrismaJson(entry, logger, field, path.concat(String(index)), guardOverride); + const sanitized = sanitizePrismaJson(entry, logger, field, path.concat(String(index))); if (sanitized !== entry) mutated = true; return sanitized; }); @@ -121,7 +99,7 @@ function sanitizePrismaJson( const entries = Object.entries(value as Record) as Array<[string, unknown]>; const out: Record = {}; for (const [key, entry] of entries) { - const sanitized = sanitizePrismaJson(entry, logger, field, path.concat(key), guardOverride); + const sanitized = sanitizePrismaJson(entry, logger, field, path.concat(key)); out[key] = sanitized; if (sanitized !== entry) mutated = true; } @@ -180,17 +158,8 @@ export function deepSanitizeCreateData( return sanitizeContextItemPayload(data, logger); } -type SanitizePayloadOptions = { - guard?: boolean; -}; - -export function sanitizeContextItemPayload( - payload: TPayload, - logger?: LoggerLike, - options?: SanitizePayloadOptions, -): TPayload { +export function sanitizeContextItemPayload(payload: TPayload, logger?: LoggerLike): TPayload { if (payload === null || typeof payload !== 'object') return payload; - const guard = options?.guard; const seen = new WeakMap(); const resolveField = (path: string[]): SanitizeField => { @@ -203,7 +172,7 @@ export function sanitizeContextItemPayload( function sanitizeValue(value: unknown, path: string[]): unknown { if (typeof value === 'string') { - return sanitizeString(value, logger, { field: resolveField(path), path }, guard); + return sanitizeString(value, logger, { field: resolveField(path), path }); } if (Array.isArray(value)) { diff --git a/packages/platform-server/src/nodes/tools/memory/memory.tool.ts b/packages/platform-server/src/nodes/tools/memory/memory.tool.ts index 9d9249409..aae9b0f6b 100644 --- a/packages/platform-server/src/nodes/tools/memory/memory.tool.ts +++ b/packages/platform-server/src/nodes/tools/memory/memory.tool.ts @@ -137,8 +137,6 @@ export class UnifiedMemoryFunctionTool extends FunctionTool { if (githubToken && this.hasFlakeRepoConfig()) { const hasToken = !!envMerged && typeof envMerged === 'object' && 'GITHUB_TOKEN' in envMerged; if (!hasToken) { - envMerged = { ...(envMerged ?? {}), GITHUB_TOKEN: githubToken } as Record; + const nextEnv: Record = { ...(envMerged ?? {}), GITHUB_TOKEN: githubToken }; + envMerged = nextEnv; } } @@ -180,11 +181,18 @@ export class WorkspaceNode extends Node { if (!hasNixConfig && ncpsEnabled && !!ncpsUrl && keys.length > 0) { const joined = keys.join(' '); const nixConfig = `substituters = ${ncpsUrl} https://cache.nixos.org\ntrusted-public-keys = ${joined} cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=`; - envMerged = { ...(envMerged || {}), NIX_CONFIG: nixConfig } as Record; + const nextEnv: Record = { ...(envMerged || {}), NIX_CONFIG: nixConfig }; + envMerged = nextEnv; nixConfigInjected = true; } - const envWithDinD = enableDinD ? { ...(envMerged || {}), DOCKER_HOST: DOCKER_HOST_ENV } : envMerged || undefined; + let envWithDinD: Record | undefined; + if (enableDinD) { + const nextEnv: Record = { ...(envMerged ?? {}), DOCKER_HOST: DOCKER_HOST_ENV }; + envWithDinD = nextEnv; + } else { + envWithDinD = envMerged || undefined; + } const networkAlias = this.sanitizeNetworkAlias(threadId); const cpuLimitNano = this.normalizeCpuLimit(this.config?.cpu_limit); const memoryLimitBytes = this.normalizeMemoryLimit(this.config?.memory_limit); diff --git a/packages/platform-server/src/settings/llm/llmSettings.service.ts b/packages/platform-server/src/settings/llm/llmSettings.service.ts index 32de85da6..43538533d 100644 --- a/packages/platform-server/src/settings/llm/llmSettings.service.ts +++ b/packages/platform-server/src/settings/llm/llmSettings.service.ts @@ -355,7 +355,7 @@ export class LLMSettingsService { const masterKey = this.config.litellmMasterKey; const hasMasterKey = Boolean(masterKey); const sanitizedBaseUrl = baseUrlRaw ? redactBaseUrl(baseUrlRaw) : undefined; - const configured = Boolean(provider === 'litellm' && baseUrlRaw && masterKey); + const configured = Boolean(baseUrlRaw && masterKey); const status: LiteLLMAdminStatus = { configured, baseUrl: sanitizedBaseUrl, @@ -363,12 +363,6 @@ export class LLMSettingsService { provider, }; - if (provider !== 'litellm') { - status.configured = false; - status.reason = 'provider_mismatch'; - return status; - } - if (!baseUrlRaw || !masterKey) { status.configured = false; status.reason = 'missing_env'; diff --git a/packages/platform-server/src/settings/llm/types.ts b/packages/platform-server/src/settings/llm/types.ts index a1ca6ba6f..1d9f4c344 100644 --- a/packages/platform-server/src/settings/llm/types.ts +++ b/packages/platform-server/src/settings/llm/types.ts @@ -50,5 +50,5 @@ export type LiteLLMAdminStatus = { hasMasterKey: boolean; provider: string; adminReachable?: boolean; - reason?: 'missing_env' | 'unauthorized' | 'unreachable' | 'provider_mismatch'; + reason?: 'missing_env' | 'unauthorized' | 'unreachable'; }; diff --git a/packages/platform-ui/README.md b/packages/platform-ui/README.md index db3813731..c812ad0ab 100644 --- a/packages/platform-ui/README.md +++ b/packages/platform-ui/README.md @@ -47,7 +47,7 @@ Docs Model field (Agent) - The Agent static configuration view uses a free-text input for model. -- Enter any valid OpenAI/LiteLLM model identifier or LiteLLM alias. +- Enter any valid LiteLLM provider identifier or alias (e.g., openai/gpt-4o-mini). - Examples: `openai/gpt-4o-mini`, `claude-3-5-sonnet`, or a custom alias like `gpt-5`. - The UI trims whitespace and requires a non-empty value; availability is checked at runtime by the provider. diff --git a/packages/platform-ui/src/api/modules/llmSettings.ts b/packages/platform-ui/src/api/modules/llmSettings.ts index 587aef5b8..b4eabc76d 100644 --- a/packages/platform-ui/src/api/modules/llmSettings.ts +++ b/packages/platform-ui/src/api/modules/llmSettings.ts @@ -58,7 +58,7 @@ export type LiteLLMAdminStatus = { hasMasterKey: boolean; provider: string; adminReachable?: boolean; - reason?: 'missing_env' | 'unauthorized' | 'unreachable' | 'provider_mismatch'; + reason?: 'missing_env' | 'unauthorized' | 'unreachable'; }; export function isLiteLLMMissingConfigError(error: unknown): error is AxiosError { diff --git a/packages/platform-ui/src/components/nix/NixRepoInstallSection.tsx b/packages/platform-ui/src/components/nix/NixRepoInstallSection.tsx index d9262ba07..2bfcc343c 100644 --- a/packages/platform-ui/src/components/nix/NixRepoInstallSection.tsx +++ b/packages/platform-ui/src/components/nix/NixRepoInstallSection.tsx @@ -23,7 +23,7 @@ const REPO_ERROR_MESSAGES: Record = { repo_not_found: 'Repository not found on GitHub.', ref_not_found: 'Branch, tag, or commit could not be resolved.', non_flake_repo: 'flake.nix not found in the repository at that ref.', - unauthorized_private_repo: 'Configure a GitHub token to access this repository.', + unauthorized_private_repo: 'Private GitHub repositories are not supported. Use a public repository.', validation_error: 'Invalid repository, ref, or attribute.', github_error: 'GitHub API error while resolving repository.', timeout: 'Request timed out contacting GitHub.', diff --git a/packages/platform-ui/src/features/llmSettings/SettingsLlmContainer.tsx b/packages/platform-ui/src/features/llmSettings/SettingsLlmContainer.tsx index 660e2f046..261026745 100644 --- a/packages/platform-ui/src/features/llmSettings/SettingsLlmContainer.tsx +++ b/packages/platform-ui/src/features/llmSettings/SettingsLlmContainer.tsx @@ -134,9 +134,6 @@ export function SettingsLlmContainer(): ReactElement { const vars = missingEnvKeys?.join(' and ') ?? 'LITELLM_BASE_URL and LITELLM_MASTER_KEY'; return `LiteLLM administration requires ${vars}. Update the platform server environment and restart.`; } - if (adminStatusReason === 'provider_mismatch') { - return 'Set LLM_PROVIDER=litellm on the platform server to enable LiteLLM administration.'; - } if (adminStatusReason === 'unauthorized') { return 'LiteLLM admin authentication failed. Verify the LiteLLM master key.'; } @@ -171,22 +168,6 @@ export function SettingsLlmContainer(): ReactElement { ); } - if (adminStatusReason === 'provider_mismatch') { - return ( -
-

LiteLLM administration is disabled because the platform server is not running in LiteLLM mode.

-

- Update the environment to include LLM_PROVIDER=litellm and restart the server. -

-

- - View the server LiteLLM admin setup guide - - . -

-
- ); - } if (adminStatusReason === 'unauthorized') { return (
diff --git a/packages/platform-ui/src/pages/__tests__/settings-llm.test.tsx b/packages/platform-ui/src/pages/__tests__/settings-llm.test.tsx index 85828e745..c9777a2fd 100644 --- a/packages/platform-ui/src/pages/__tests__/settings-llm.test.tsx +++ b/packages/platform-ui/src/pages/__tests__/settings-llm.test.tsx @@ -1023,17 +1023,17 @@ describe('Settings/LLM page', () => { expect(addModelButton).toBeDisabled(); }); - it('disables admin actions when the platform is running without LiteLLM provider', async () => { + it('disables admin actions when LiteLLM config is missing', async () => { const user = userEvent.setup({ pointerEventsCheck: 0 }); server.use( http.get(abs('/api/settings/llm/admin-status'), () => HttpResponse.json({ configured: false, - baseUrl: 'http://127.0.0.1:4000', - hasMasterKey: true, - provider: 'openai', - reason: 'provider_mismatch', + baseUrl: undefined, + hasMasterKey: false, + provider: 'litellm', + reason: 'missing_env', }), ), http.get(abs('/api/settings/llm/providers'), () => HttpResponse.json([])), @@ -1051,9 +1051,8 @@ describe('Settings/LLM page', () => { const banner = await screen.findByRole('alert'); expect(await within(banner).findByText('LiteLLM administration unavailable')).toBeInTheDocument(); expect( - await within(banner).findByText('LiteLLM administration is disabled because the platform server is not running in LiteLLM mode.'), + await within(banner).findByText('LiteLLM administration requires LITELLM_BASE_URL and LITELLM_MASTER_KEY. Update the platform server environment and restart.'), ).toBeInTheDocument(); - expect(await within(banner).findByText('LLM_PROVIDER=litellm', { selector: 'code' })).toBeInTheDocument(); const addCredentialButton = await screen.findByRole('button', { name: 'Add Credential' }); expect(addCredentialButton).toBeDisabled();