diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index b177557..3236e63 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -12,23 +12,23 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 - + - name: Setup Node.js uses: actions/setup-node@v3 with: node-version: '18' registry-url: 'https://registry.npmjs.org/' cache: 'npm' - + - name: Install dependencies run: npm ci - + - name: Run tests run: npm test - + - name: Run typecheck run: npm run typecheck - + - name: Publish if: startsWith(github.ref, 'refs/tags/v') env: @@ -40,4 +40,4 @@ jobs: echo "GITHUB_REF->"$GITHUB_REF # test tag signature git tag -v $(git describe --tags --abbrev=0) - npm publish \ No newline at end of file + npm publish diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9d0b60f..c26d88a 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -2,27 +2,27 @@ name: Test and Typecheck on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ main ] + branches: [main] jobs: test: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - + - name: Setup Node.js uses: actions/setup-node@v3 with: node-version: '18' cache: 'npm' - + - name: Install dependencies run: npm ci - + - name: Run tests run: npm test - + - name: Run typecheck - run: npm run typecheck \ No newline at end of file + run: npm run typecheck diff --git a/CHANGELOG.md b/CHANGELOG.md index b2bdaef..0d27269 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,14 +3,15 @@ ## 0.10.0 (2025-05-13) ### Changes + - Renamed `callAI` to `callAi` for consistent casing across the library - Added backward compatibility export to support existing code using `callAI` spelling - Added tests to ensure backward compatibility works correctly - ## 0.5.0 (2024-06-28) ### Features + - Added comprehensive multi-model support for structured JSON output - Implemented model-specific strategies for different AI providers: - OpenAI/GPT models use native JSON schema support @@ -25,6 +26,7 @@ ## 0.4.1 (2024-06-22) ### Fixes + - Improved error handling for both streaming and non-streaming API calls - Added better error response format consistency - Addressed TypeScript type issues in tests @@ -33,6 +35,7 @@ ## 0.4.0 (2024-06-22) ### Features + - Added default "result" name for all JSON schemas - Improved test coverage for schema name handling - Enhanced documentation for schema name property @@ -41,6 +44,7 @@ ## 0.3.1 (2024-06-22) ### Improvements + - Added proper support for schema name property in OpenRouter JSON schemas - Updated documentation to clarify that name is optional but supported - Ensured examples in documentation consistently show name usage @@ -49,6 +53,7 @@ ## 0.3.0 (2024-06-22) ### Bug Fixes + - Fixed JSON schema structure for OpenRouter API integration - Removed unnecessary nested `schema` object within the JSON schema - Removed `provider.require_parameters` field which was causing issues @@ -58,6 +63,7 @@ ## 0.2.1 (2024-06-17) ### Improvements + - Enhanced schema handling to better support JSON schema definition - Added test coverage for complex schema use cases - Updated documentation with comprehensive examples for structured responses @@ -66,12 +72,14 @@ ## 0.2.0 (2024-06-16) ### Breaking Changes + - Simplified API by moving `schema` parameter into the options object - Changed streaming to be explicitly opt-in (default is non-streaming) - Updated return type to be `Promise` for non-streaming and `AsyncGenerator` for streaming - Removed need for `null` parameter when not using schema ### Improvements + - Improved TypeScript types and documentation - Reduced code duplication by extracting common request preparation logic - Enhanced error handling for both streaming and non-streaming modes @@ -83,4 +91,4 @@ - Initial release - Support for streaming responses - JSON schema for structured output -- Compatible with OpenRouter and OpenAI API \ No newline at end of file +- Compatible with OpenRouter and OpenAI API diff --git a/README.md b/README.md index 6e6a793..360d22c 100644 --- a/README.md +++ b/README.md @@ -15,22 +15,22 @@ pnpm add call-ai ## Usage ```typescript -import { callAi } from 'call-ai'; +import { callAi } from "call-ai"; // Basic usage with string prompt (non-streaming by default) -const response = await callAi('Explain quantum computing in simple terms', { - apiKey: 'your-api-key', - model: 'gpt-4' +const response = await callAi("Explain quantum computing in simple terms", { + apiKey: "your-api-key", + model: "gpt-4", }); // The response is the complete text console.log(response); // With streaming enabled (returns an AsyncGenerator) -const generator = callAi('Tell me a story', { - apiKey: 'your-api-key', - model: 'gpt-4', - stream: true +const generator = callAi("Tell me a story", { + apiKey: "your-api-key", + model: "gpt-4", + stream: true, }); // Process streaming updates @@ -40,13 +40,13 @@ for await (const chunk of generator) { // Using message array for more control const messages = [ - { role: 'system', content: 'You are a helpful assistant.' }, - { role: 'user', content: 'Explain quantum computing in simple terms' } + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Explain quantum computing in simple terms" }, ]; const response = await callAi(messages, { - apiKey: 'your-api-key', - model: 'gpt-4' + apiKey: "your-api-key", + model: "gpt-4", }); console.log(response); @@ -55,16 +55,16 @@ console.log(response); const schema = { name: "exercise_summary", properties: { - title: { type: 'string' }, - summary: { type: 'string' }, - points: { type: 'array', items: { type: 'string' } } + title: { type: "string" }, + summary: { type: "string" }, + points: { type: "array", items: { type: "string" } }, }, - required: ['title', 'summary'] + required: ["title", "summary"], }; -const response = await callAi('Summarize the benefits of exercise', { - apiKey: 'your-api-key', - schema: schema +const response = await callAi("Summarize the benefits of exercise", { + apiKey: "your-api-key", + schema: schema, }); const structuredOutput = JSON.parse(response); @@ -73,24 +73,24 @@ console.log(structuredOutput.title); // Streaming with schema for OpenRouter structured JSON output const schema = { properties: { - title: { type: 'string' }, - items: { - type: 'array', - items: { - type: 'object', + title: { type: "string" }, + items: { + type: "array", + items: { + type: "object", properties: { - name: { type: 'string' }, - description: { type: 'string' } - } - } - } - } + name: { type: "string" }, + description: { type: "string" }, + }, + }, + }, + }, }; -const generator = callAi('Create a list of sci-fi books', { - apiKey: 'your-api-key', +const generator = callAi("Create a list of sci-fi books", { + apiKey: "your-api-key", stream: true, - schema: schema + schema: schema, }); for await (const chunk of generator) { @@ -124,20 +124,20 @@ Different LLMs have different strengths when working with structured data. Based ### Schema Complexity Guide -| Model Family | Grade | Simple Flat Schema | Complex Flat Schema | Nested Schema | Best For | -|--------------|-------|-------------------|---------------------|---------------|----------| -| OpenAI | A | ✅ Excellent | ✅ Excellent | ✅ Excellent | Most reliable for all schema types | -| Gemini | A | ✅ Excellent | ✅ Excellent | ✅ Good | Good all-around performance, especially with flat schemas | -| Claude | B | ✅ Excellent | ⚠️ Good (occasional JSON errors) | ✅ Good | Simple schemas, robust handling of complex prompts | -| Llama 3 | C | ✅ Good | ✅ Good | ❌ Poor | Simpler flat schemas, may struggle with nested structures | -| Deepseek | C | ✅ Good | ✅ Good | ❌ Poor | Basic flat schemas only | +| Model Family | Grade | Simple Flat Schema | Complex Flat Schema | Nested Schema | Best For | +| ------------ | ----- | ------------------ | -------------------------------- | ------------- | --------------------------------------------------------- | +| OpenAI | A | ✅ Excellent | ✅ Excellent | ✅ Excellent | Most reliable for all schema types | +| Gemini | A | ✅ Excellent | ✅ Excellent | ✅ Good | Good all-around performance, especially with flat schemas | +| Claude | B | ✅ Excellent | ⚠️ Good (occasional JSON errors) | ✅ Good | Simple schemas, robust handling of complex prompts | +| Llama 3 | C | ✅ Good | ✅ Good | ❌ Poor | Simpler flat schemas, may struggle with nested structures | +| Deepseek | C | ✅ Good | ✅ Good | ❌ Poor | Basic flat schemas only | ### Schema Structure Recommendations 1. **Flat schemas perform better across all models**. If you need maximum compatibility, avoid deeply nested structures. 2. **Field names matter**. Some models have preferences for certain property naming patterns: - - Use simple, common naming patterns like `name`, `type`, `items`, `price` + - Use simple, common naming patterns like `name`, `type`, `items`, `price` - Avoid deeply nested object hierarchies (more than 2 levels deep) - Keep array items simple (strings or flat objects) @@ -154,36 +154,36 @@ Different LLMs have different strengths when working with structured data. Based You can provide your API key in three ways: 1. Directly in the options: + ```typescript -const response = await callAi('Hello', { apiKey: 'your-api-key' }); +const response = await callAi("Hello", { apiKey: "your-api-key" }); ``` 2. Set globally in the browser: + ```typescript -window.CALLAI_API_KEY = 'your-api-key'; -const response = await callAi('Hello'); +window.CALLAI_API_KEY = "your-api-key"; +const response = await callAi("Hello"); ``` 3. Use environment variables in Node.js (with a custom implementation): + ```typescript // Example of environment variable integration -import { callAi } from 'call-ai'; +import { callAi } from "call-ai"; const apiKey = process.env.OPENAI_API_KEY || process.env.OPENROUTER_API_KEY; -const response = await callAi('Hello', { apiKey }); +const response = await callAi("Hello", { apiKey }); ``` ## API ```typescript // Main function -function callAi( - prompt: string | Message[], - options?: CallAIOptions -): Promise | AsyncGenerator +function callAi(prompt: string | Message[], options?: CallAIOptions): Promise | AsyncGenerator; // Types type Message = { - role: 'user' | 'system' | 'assistant'; + role: "user" | "system" | "assistant"; content: string; }; @@ -209,12 +209,12 @@ interface CallAIOptions { ### Options -* `apiKey`: Your API key (can also be set via window.CALLAI_API_KEY) -* `model`: Model identifier (default: 'openrouter/auto') -* `endpoint`: API endpoint (default: 'https://openrouter.ai/api/v1/chat/completions') -* `stream`: Enable streaming responses (default: false) -* `schema`: Optional JSON schema for structured output -* Any other options are passed directly to the API (temperature, max_tokens, etc.) +- `apiKey`: Your API key (can also be set via window.CALLAI_API_KEY) +- `model`: Model identifier (default: 'openrouter/auto') +- `endpoint`: API endpoint (default: 'https://openrouter.ai/api/v1/chat/completions') +- `stream`: Enable streaming responses (default: false) +- `schema`: Optional JSON schema for structured output +- Any other options are passed directly to the API (temperature, max_tokens, etc.) ## License @@ -251,12 +251,14 @@ This library uses GitHub Actions to automate the release process: 5. Push changes and tag: `git push origin main vX.Y.Z` The GitHub workflow in `.github/workflows/publish.yml` will: + - Automatically trigger when a new tag is pushed - Run tests and type checking - Verify the tag signature - Publish the package to npm When making significant changes, remember to: + - Document breaking changes in the changelog - Update documentation to reflect API changes -- Update TypeScript types \ No newline at end of file +- Update TypeScript types diff --git a/src/api-core.ts b/call-ai/api-core.ts similarity index 71% rename from src/api-core.ts rename to call-ai/api-core.ts index e695ea1..e34d000 100644 --- a/src/api-core.ts +++ b/call-ai/api-core.ts @@ -9,14 +9,20 @@ import { Schema, StreamResponse, ThenableStreamResponse, -} from "./types"; -import { globalDebug } from "./key-management"; -import { callAINonStreaming } from "./non-streaming"; -import { callAIStreaming } from "./streaming"; + isToolUseType, + isToolUseResponse, + isOpenAIArray, + OpenAIFunctionCall, + RequestSchema, + CallAIError, +} from "./types.js"; +import { globalDebug } from "./key-management.js"; +import { callAINonStreaming } from "./non-streaming.js"; +import { callAIStreaming } from "./streaming.js"; +import { PACKAGE_VERSION } from "./version.js"; +import { callAiEnv } from "./utils.js"; // Import package version for debugging -// eslint-disable-next-line @typescript-eslint/no-var-requires -const PACKAGE_VERSION = require("../package.json").version; /** * Main API interface function for making AI API calls @@ -38,12 +44,17 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { let schemaStrategy: SchemaStrategy = { strategy: "none" as const, model: options.model || "openai/gpt-3.5-turbo", - prepareRequest: () => ({}), - processResponse: (response: any) => { + prepareRequest: () => { + throw new Error("Schema strategy not implemented"); + }, + processResponse: (response) => { // If response is an object, stringify it to match expected test output if (response && typeof response === "object") { return JSON.stringify(response); } + if (typeof response !== "string") { + throw new Error(`Unexpected response type: ${typeof response}`); + } return response; }, shouldForceStream: false, @@ -61,11 +72,12 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { shouldForceStream: false, prepareRequest: (schema) => { // Parse the schema to extract the function definition - let toolDef: any = {}; + let toolDef: Partial = {}; if (typeof schema === "string") { try { toolDef = JSON.parse(schema); + // eslint-disable-next-line @typescript-eslint/no-unused-vars } catch (e) { // If it's not valid JSON, we'll use it as a plain description toolDef = { description: schema }; @@ -86,7 +98,7 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { properties: {}, }, }, - }, + } satisfies OpenAIFunctionCall, ]; return { @@ -104,30 +116,24 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { } // Handle direct tool_use format - if (response && response.type === "tool_use") { + if (isToolUseType(response)) { return response.input || "{}"; } // Handle object with tool_use property - if (response && response.tool_use) { + if (isToolUseResponse(response)) { return response.tool_use.input || "{}"; } // Handle array of tool calls (OpenAI format) - if (Array.isArray(response)) { - if ( - response.length > 0 && - response[0].function && - response[0].function.arguments - ) { + if (isOpenAIArray(response)) { + if (response.length > 0 && response[0].function && response[0].function.arguments) { return response[0].function.arguments; } } // For all other cases, return string representation - return typeof response === "string" - ? response - : JSON.stringify(response); + return typeof response === "string" ? response : JSON.stringify(response); }, }; } else { @@ -138,7 +144,7 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { shouldForceStream: false, prepareRequest: (schema) => { // Create a properly formatted JSON schema request - const schemaObj = (schema as Schema) || {}; + const schemaObj: Partial = schema || {}; return { response_format: { type: "json_schema", @@ -147,13 +153,8 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { schema: { type: "object", properties: schemaObj.properties || {}, - required: - schemaObj.required || - Object.keys(schemaObj.properties || {}), - additionalProperties: - schemaObj.additionalProperties !== undefined - ? schemaObj.additionalProperties - : false, + required: schemaObj.required || Object.keys(schemaObj.properties || {}), + additionalProperties: schemaObj.additionalProperties !== undefined ? schemaObj.additionalProperties : false, }, }, }, @@ -189,7 +190,6 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { })(); // Create a proxy object that acts both as a Promise and an AsyncGenerator for backward compatibility - // @ts-ignore - We're deliberately implementing a proxy with dual behavior return createBackwardCompatStreamingProxy(streamPromise); } else { if (debug) { @@ -213,9 +213,7 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) { * @param generator The streaming generator returned by callAi * @returns Promise with the complete response */ -async function bufferStreamingResults( - generator: AsyncGenerator, -): Promise { +async function bufferStreamingResults(generator: AsyncGenerator): Promise { let result = ""; try { @@ -228,17 +226,21 @@ async function bufferStreamingResults( } catch (error) { // If we already collected some content, attach it to the error if (error instanceof Error) { - const enhancedError = new Error( - `${error.message} (Partial content: ${result.slice(0, 100)}...)`, - ); - (enhancedError as any).partialContent = result; - (enhancedError as any).originalError = error; + const enhancedError = new CallAIError({ + message: `${error.message} (Partial content: ${result.slice(0, 100)}...)`, + status: 511, + partialContent: result, + originalError: error, + }); throw enhancedError; } else { // For non-Error objects, create an Error with info - const newError = new Error(`Streaming error: ${String(error)}`); - (newError as any).partialContent = result; - (newError as any).originalError = error; + const newError = new CallAIError({ + message: `Streaming error: ${String(error)}`, + status: 511, + partialContent: result, + originalError: error as Error, + }); throw newError; } } @@ -248,25 +250,18 @@ async function bufferStreamingResults( * Create a proxy that acts both as a Promise and an AsyncGenerator for backward compatibility * @internal This is for internal use only, not part of public API */ -function createBackwardCompatStreamingProxy( - promise: Promise, -): ThenableStreamResponse { +function createBackwardCompatStreamingProxy(promise: Promise): ThenableStreamResponse { // Create a proxy that forwards methods to the Promise or AsyncGenerator as appropriate - return new Proxy({} as any, { + return new Proxy({} as ThenableStreamResponse, { get(_target, prop) { // First check if it's an AsyncGenerator method (needed for for-await) - if ( - prop === "next" || - prop === "throw" || - prop === "return" || - prop === Symbol.asyncIterator - ) { + if (prop === "next" || prop === "throw" || prop === "return" || prop === Symbol.asyncIterator) { // Create wrapper functions that await the Promise first if (prop === Symbol.asyncIterator) { return function () { return { // Implement async iterator that gets the generator first - async next(value?: unknown) { + async next(value: unknown) { try { const generator = await promise; return generator.next(value); @@ -280,9 +275,18 @@ function createBackwardCompatStreamingProxy( } // Methods like next, throw, return - return async function (value?: unknown) { + return async function (value: unknown) { const generator = await promise; - return (generator as any)[prop](value); + switch (prop) { + case "next": + return generator.next(value); + case "throw": + return generator.throw(value); + case "return": + return generator.return(value as string); + default: + throw new Error(`Unknown method: ${String(prop)}`); + } }; } @@ -303,49 +307,33 @@ function createBackwardCompatStreamingProxy( * @param options Call options * @returns Validated and processed parameters including apiKey */ -function prepareRequestParams( - prompt: string | Message[], - options: CallAIOptions = {}, -) { +function prepareRequestParams(prompt: string | Message[], options: CallAIOptions = {}) { // Get API key from options or window.CALLAI_API_KEY (exactly matching original) - const apiKey = - options.apiKey || - (typeof window !== "undefined" ? (window as any).CALLAI_API_KEY : null); + const apiKey = options.apiKey || callAiEnv.CALLAI_API_KEY; // Validate API key with original error message if (!apiKey) { - throw new Error( - "API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY", - ); + throw new Error("API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY"); } // Validate and process input parameters if (!prompt || (typeof prompt !== "string" && !Array.isArray(prompt))) { - throw new Error( - `Invalid prompt: ${prompt}. Must be a string or an array of message objects.`, - ); + throw new Error(`Invalid prompt: ${prompt}. Must be a string or an array of message objects.`); } // Convert simple string prompts to message array format - const messages = Array.isArray(prompt) - ? prompt - : [{ role: "user", content: prompt }]; + const messages = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt }]; // Validate message structure if array provided if (Array.isArray(prompt)) { for (const message of prompt) { if (!message.role || !message.content) { throw new Error( - `Invalid message format. Each message must have 'role' and 'content' properties. Received: ${JSON.stringify( - message, - )}`, + `Invalid message format. Each message must have 'role' and 'content' properties. Received: ${JSON.stringify(message)}`, ); } - if ( - typeof message.role !== "string" || - (typeof message.content !== "string" && !Array.isArray(message.content)) - ) { + if (typeof message.role !== "string" || (typeof message.content !== "string" && !Array.isArray(message.content))) { throw new Error( `Invalid message format. 'role' must be a string and 'content' must be a string or array. Received role: ${typeof message.role}, content: ${typeof message.content}`, ); @@ -354,12 +342,7 @@ function prepareRequestParams( } // If provider-specific options are given, check for conflicts - if ( - options.provider && - options.provider !== "auto" && - options.model && - !options.model.startsWith(options.provider + "/") - ) { + if (options.provider && options.provider !== "auto" && options.model && !options.model.startsWith(options.provider + "/")) { console.warn( `[callAi:${PACKAGE_VERSION}] WARNING: Specified provider '${options.provider}' doesn't match model '${options.model}'. Using model as specified.`, ); @@ -373,10 +356,4 @@ function prepareRequestParams( } // Export main API functions -export { - callAi, - bufferStreamingResults, - createBackwardCompatStreamingProxy, - prepareRequestParams, - PACKAGE_VERSION, -}; +export { callAi, bufferStreamingResults, createBackwardCompatStreamingProxy, prepareRequestParams, PACKAGE_VERSION }; diff --git a/src/api.ts b/call-ai/api.ts similarity index 71% rename from src/api.ts rename to call-ai/api.ts index 22d1f67..d8b9fc6 100644 --- a/src/api.ts +++ b/call-ai/api.ts @@ -1,20 +1,16 @@ /** * Core API implementation for call-ai */ -import { - CallAIOptions, - Message, - ResponseMeta, - SchemaStrategy, - StreamResponse, -} from "./types"; -import { chooseSchemaStrategy } from "./strategies"; -import { responseMetadata, boxString, getMeta } from "./response-metadata"; -import { keyStore, globalDebug } from "./key-management"; -import { handleApiError, checkForInvalidModelError } from "./error-handling"; -import { createBackwardCompatStreamingProxy } from "./api-core"; -import { extractContent, extractClaudeResponse } from "./non-streaming"; -import { createStreamingGenerator } from "./streaming"; +import { CallAIError, CallAIErrorParams, CallAIOptions, Message, ResponseMeta, SchemaStrategy, StreamResponse } from "./types.js"; +import { chooseSchemaStrategy } from "./strategies/index.js"; +import { responseMetadata, boxString } from "./response-metadata.js"; +import { keyStore, globalDebug } from "./key-management.js"; +import { handleApiError, checkForInvalidModelError } from "./error-handling.js"; +import { createBackwardCompatStreamingProxy } from "./api-core.js"; +import { extractContent, extractClaudeResponse } from "./non-streaming.js"; +import { createStreamingGenerator } from "./streaming.js"; +import { PACKAGE_VERSION } from "./version.js"; +import { callAiEnv } from "./utils.js"; // Key management is now imported from ./key-management @@ -33,11 +29,9 @@ import { createStreamingGenerator } from "./streaming"; // boxString and getMeta functions are now imported from ./response-metadata // Re-export getMeta to maintain backward compatibility -export { getMeta }; +// export { getMeta }; // Import package version for debugging -// eslint-disable-next-line @typescript-eslint/no-var-requires -const PACKAGE_VERSION = require("../package.json").version; // Default fallback model when the primary model fails or is unavailable const FALLBACK_MODEL = "openrouter/auto"; @@ -50,15 +44,9 @@ const FALLBACK_MODEL = "openrouter/auto"; * or a Promise that resolves to an AsyncGenerator when streaming is enabled. * The AsyncGenerator yields partial responses as they arrive. */ -export function callAi( - prompt: string | Message[], - options: CallAIOptions = {}, -): Promise { +export function callAi(prompt: string | Message[], options: CallAIOptions = {}): Promise { // Check if we need to force streaming based on model strategy - const schemaStrategy = chooseSchemaStrategy( - options.model, - options.schema || null, - ); + const schemaStrategy = chooseSchemaStrategy(options.model, options.schema || null); // We no longer set a default maxTokens // Will only include max_tokens in the request if explicitly set by the user @@ -78,31 +66,21 @@ export function callAi( // but also supports legacy non-awaited usage for backward compatibility const streamPromise = (async () => { // Do setup and validation before returning the generator - const { endpoint, requestOptions, model, schemaStrategy } = - prepareRequestParams(prompt, { ...options, stream: true }); + const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true }); // Use either explicit debug option or global debug flag const debug = options.debug || globalDebug; if (debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`); console.log(`[callAi:${PACKAGE_VERSION}] With model: ${model}`); - console.log( - `[callAi:${PACKAGE_VERSION}] Request headers:`, - JSON.stringify(requestOptions.headers), - ); + console.log(`[callAi:${PACKAGE_VERSION}] Request headers:`, JSON.stringify(requestOptions.headers)); } let response; try { response = await fetch(endpoint, requestOptions); if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Fetch completed with status:`, - response.status, - response.statusText, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Fetch completed with status:`, response.status, response.statusText); // Log all headers console.log(`[callAi:${PACKAGE_VERSION}] Response headers:`); @@ -117,22 +95,15 @@ export function callAi( const responseText = await diagnosticResponse.text(); console.log( `[callAi:${PACKAGE_VERSION}] First 500 chars of response body:`, - responseText.substring(0, 500) + - (responseText.length > 500 ? "..." : ""), + responseText.substring(0, 500) + (responseText.length > 500 ? "..." : ""), ); } catch (e) { - console.log( - `[callAi:${PACKAGE_VERSION}] Could not read response body for diagnostics:`, - e, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Could not read response body for diagnostics:`, e); } } } catch (fetchError) { if (options.debug) { - console.error( - `[callAi:${PACKAGE_VERSION}] Network error during fetch:`, - fetchError, - ); + console.error(`[callAi:${PACKAGE_VERSION}] Network error during fetch:`, fetchError); } throw fetchError; // Re-throw network errors } @@ -143,14 +114,8 @@ export function callAi( if (options.debug) { console.log(`[callAi:${PACKAGE_VERSION}] Response.ok =`, response.ok); - console.log( - `[callAi:${PACKAGE_VERSION}] Response.status =`, - response.status, - ); - console.log( - `[callAi:${PACKAGE_VERSION}] Response.statusText =`, - response.statusText, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Response.status =`, response.status); + console.log(`[callAi:${PACKAGE_VERSION}] Response.statusText =`, response.statusText); console.log(`[callAi:${PACKAGE_VERSION}] Response.type =`, response.type); console.log(`[callAi:${PACKAGE_VERSION}] Content-Type =`, contentType); } @@ -174,18 +139,12 @@ export function callAi( try { // Check if this is an invalid model error - const modelCheckResult = await checkForInvalidModelError( - clonedResponse, - model, - options.debug, - ); + const modelCheckResult = await checkForInvalidModelError(clonedResponse, model, options.debug); isInvalidModel = modelCheckResult.isInvalidModel; if (isInvalidModel) { if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Retrying with fallback model: ${FALLBACK_MODEL}`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Retrying with fallback model: ${FALLBACK_MODEL}`); } // Retry with fallback model return (await callAi(prompt, { @@ -194,10 +153,7 @@ export function callAi( })) as StreamResponse; } } catch (modelCheckError) { - console.error( - `[callAi:${PACKAGE_VERSION}] Error during model check:`, - modelCheckError, - ); + console.error(`[callAi:${PACKAGE_VERSION}] Error during model check:`, modelCheckError); // Continue with normal error handling } } @@ -221,11 +177,7 @@ export function callAi( let errorMessage = ""; // Handle common error formats - if ( - errorJson.error && - typeof errorJson.error === "object" && - errorJson.error.message - ) { + if (errorJson.error && typeof errorJson.error === "object" && errorJson.error.message) { // OpenRouter/OpenAI format: { error: { message: "..." } } errorMessage = errorJson.error.message; } else if (errorJson.error && typeof errorJson.error === "string") { @@ -245,28 +197,22 @@ export function callAi( } if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Extracted error message:`, - errorMessage, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Extracted error message:`, errorMessage); } // Create error with standard format - const error = new Error(errorMessage); - - // Add useful metadata - (error as any).status = response.status; - (error as any).statusText = response.statusText; - (error as any).details = errorJson; - (error as any).contentType = contentType; + const error = new CallAIError({ + message: errorMessage, + status: response.status, + statusText: response.statusText, + details: errorJson, + contentType, + }); throw error; } catch (jsonError) { // If JSON parsing fails, extract a useful message from the raw error body if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] JSON parse error:`, - jsonError, - ); + console.log(`[callAi:${PACKAGE_VERSION}] JSON parse error:`, jsonError); } // Try to extract a useful message even from non-JSON text @@ -275,10 +221,7 @@ export function callAi( // Check if it's a plain text error message if (errorBody && errorBody.trim().length > 0) { // Limit length for readability - errorMessage = - errorBody.length > 100 - ? errorBody.substring(0, 100) + "..." - : errorBody; + errorMessage = errorBody.length > 100 ? errorBody.substring(0, 100) + "..." : errorBody; } else { errorMessage = `API error: ${response.status} ${response.statusText}`; } @@ -289,17 +232,16 @@ export function callAi( } if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Extracted text error message:`, - errorMessage, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Extracted text error message:`, errorMessage); } - const error = new Error(errorMessage); - (error as any).status = response.status; - (error as any).statusText = response.statusText; - (error as any).details = errorBody; - (error as any).contentType = contentType; + const error = new CallAIError({ + message: errorMessage, + status: response.status, + statusText: response.statusText, + details: errorBody, + contentType, + }); throw error; } } catch (responseError) { @@ -309,26 +251,25 @@ export function callAi( } // Fallback error - const error = new Error( - `API returned ${response.status}: ${response.statusText}`, - ); - (error as any).status = response.status; - (error as any).statusText = response.statusText; - (error as any).contentType = contentType; + const error = new CallAIError({ + message: `API returned ${response.status}: ${response.statusText}`, + status: response.status, + statusText: response.statusText, + details: undefined, + contentType, + }); throw error; } } // Only if response is OK, create and return the streaming generator if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Response OK, creating streaming generator`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Response OK, creating streaming generator`); } return createStreamingGenerator(response, options, schemaStrategy, model); })(); // For backward compatibility with v0.6.x where users didn't await the result - if (process.env.NODE_ENV !== "production") { + if (callAiEnv.NODE_ENV !== "production") { if (options.debug) { console.warn( `[callAi:${PACKAGE_VERSION}] No await found - using legacy streaming pattern. This will be removed in a future version and may cause issues with certain models.`, @@ -337,7 +278,7 @@ export function callAi( } // Create a proxy object that acts both as a Promise and an AsyncGenerator for backward compatibility - // @ts-ignore - We're deliberately implementing a proxy with dual behavior + //... @ts-ignore - We're deliberately implementing a proxy with dual behavior return createBackwardCompatStreamingProxy(streamPromise); } @@ -345,10 +286,7 @@ export function callAi( * Buffer streaming results into a single response for cases where * we need to use streaming internally but the caller requested non-streaming */ -async function bufferStreamingResults( - prompt: string | Message[], - options: CallAIOptions, -): Promise { +async function bufferStreamingResults(prompt: string | Message[], options: CallAIOptions): Promise { // Create a copy of options with streaming enabled const streamingOptions = { ...options, @@ -357,10 +295,7 @@ async function bufferStreamingResults( try { // Get streaming generator - const generator = (await callAi( - prompt, - streamingOptions, - )) as AsyncGenerator; + const generator = (await callAi(prompt, streamingOptions)) as AsyncGenerator; // For Claude JSON responses, take only the last chunk (the final processed result) // For all other cases, concatenate chunks as before @@ -385,7 +320,7 @@ async function bufferStreamingResults( } } catch (error) { // Handle errors with standard API error handling - await handleApiError(error, "Buffered streaming", options.debug, { + await handleApiError(error as CallAIErrorParams, "Buffered streaming", options.debug, { apiKey: options.apiKey, endpoint: options.endpoint, skipRefresh: options.skipRefresh, @@ -428,10 +363,7 @@ function prepareRequestParams( schemaStrategy: SchemaStrategy; } { // First try to get the API key from options or window globals - let apiKey = - options.apiKey || - keyStore.current || // Try keyStore first in case it was refreshed in a previous call - (typeof window !== "undefined" ? (window as any).CALLAI_API_KEY : null); + const apiKey = options.apiKey || keyStore.current || callAiEnv.CALLAI_API_KEY; // Try keyStore first in case it was refreshed in a previous call const schema = options.schema || null; // If no API key exists, we won't throw immediately. We'll continue and let handleApiError @@ -442,34 +374,25 @@ function prepareRequestParams( const model = schemaStrategy.model; // Get custom chat API origin if set - const customChatOrigin = - options.chatUrl || - (typeof window !== "undefined" ? (window as any).CALLAI_CHAT_URL : null) || - (typeof process !== "undefined" && process.env - ? process.env.CALLAI_CHAT_URL - : null); + const customChatOrigin = options.chatUrl || callAiEnv.CALLAI_CHAT_URL; // Use custom origin or default OpenRouter URL const endpoint = options.endpoint || - (customChatOrigin - ? `${customChatOrigin}/api/v1/chat/completions` - : "https://openrouter.ai/api/v1/chat/completions"); + (customChatOrigin ? `${customChatOrigin}/api/v1/chat/completions` : "https://openrouter.ai/api/v1/chat/completions"); // Handle both string prompts and message arrays for backward compatibility - const messages: Message[] = Array.isArray(prompt) - ? prompt - : [{ role: "user", content: prompt }]; + const messages: Message[] = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt }]; // Common parameters for both streaming and non-streaming - const requestParams: any = { + const requestParams: CallAIOptions = { model, messages, stream: options.stream !== undefined ? options.stream : false, }; // Only include temperature if explicitly set - if (options.temperature !== undefined) { + if (options.temperature) { requestParams.temperature = options.temperature; } @@ -486,9 +409,7 @@ function prepareRequestParams( // Add optional parameters if specified if (options.stop) { // Handle both single string and array of stop sequences - requestParams.stop = Array.isArray(options.stop) - ? options.stop - : [options.stop]; + requestParams.stop = Array.isArray(options.stop) ? options.stop : [options.stop]; } // Add response_format parameter for models that support JSON output @@ -499,10 +420,7 @@ function prepareRequestParams( // Add schema structure if provided (for function calling/JSON mode) if (schema) { // Apply schema-specific parameters using the selected strategy - Object.assign( - requestParams, - schemaStrategy.prepareRequest(schema, messages), - ); + Object.assign(requestParams, schemaStrategy.prepareRequest(schema, messages)); } // HTTP headers for the request @@ -531,19 +449,14 @@ function prepareRequestParams( // If we don't have an API key, throw a clear error that can be caught and handled // by the error handling system to trigger key fetching if (!apiKey) { - throw new Error( - "API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY", - ); + throw new Error("API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY"); } // Debug logging for request payload if (options.debug) { console.log(`[callAi-prepareRequest:raw] Endpoint: ${endpoint}`); console.log(`[callAi-prepareRequest:raw] Model: ${model}`); - console.log( - `[callAi-prepareRequest:raw] Payload:`, - JSON.stringify(requestParams), - ); + console.log(`[callAi-prepareRequest:raw] Payload:`, JSON.stringify(requestParams)); } return { apiKey, model, endpoint, requestOptions, schemaStrategy }; @@ -552,11 +465,7 @@ function prepareRequestParams( /** * Internal implementation for non-streaming API calls */ -async function callAINonStreaming( - prompt: string | Message[], - options: CallAIOptions = {}, - isRetry: boolean = false, -): Promise { +async function callAINonStreaming(prompt: string | Message[], options: CallAIOptions = {}, isRetry = false): Promise { try { // Start timing for metadata const startTime = Date.now(); @@ -568,8 +477,7 @@ async function callAINonStreaming( startTime: startTime, }, }; - const { endpoint, requestOptions, model, schemaStrategy } = - prepareRequestParams(prompt, options); + const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, options); const response = await fetch(endpoint, requestOptions); @@ -577,26 +485,21 @@ async function callAINonStreaming( // Handle HTTP errors, with potential fallback for invalid model if (!response.ok || response.status >= 400) { - const { isInvalidModel } = await checkForInvalidModelError( - response, - model, - options.debug, - ); + const { isInvalidModel } = await checkForInvalidModelError(response, model, options.debug); if (isInvalidModel) { // Retry with fallback model - return callAINonStreaming( - prompt, - { ...options, model: FALLBACK_MODEL }, - true, - ); + return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true); } // Create a proper error object with the status code preserved - const error: any = new Error(`HTTP error! Status: ${response.status}`); - // Add status code as a property of the error object - error.status = response.status; - error.statusCode = response.status; // Add statusCode for compatibility with different error patterns + const error = new CallAIError({ + message: `HTTP error! Status: ${response.status}`, + status: response.status, + statusText: response.statusText, + details: undefined, + contentType: "text/plain", + }); throw error; } @@ -607,11 +510,7 @@ async function callAINonStreaming( try { result = await extractClaudeResponse(response); } catch (error) { - handleApiError( - error, - "Claude API response processing failed", - options.debug, - ); + handleApiError(error as CallAIErrorParams, "Claude API response processing failed", options.debug); } } else { result = await response.json(); @@ -619,10 +518,7 @@ async function callAINonStreaming( // Debug logging for raw API response if (options.debug) { - console.log( - `[callAi-nonStreaming:raw] Response:`, - JSON.stringify(result), - ); + console.log(`[callAi-nonStreaming:raw] Response:`, JSON.stringify(result)); } // Handle error responses @@ -640,11 +536,7 @@ async function callAINonStreaming( if (options.debug) { console.warn(`Model ${model} error, retrying with ${FALLBACK_MODEL}`); } - return callAINonStreaming( - prompt, - { ...options, model: FALLBACK_MODEL }, - true, - ); + return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true); } return JSON.stringify({ error: result.error, diff --git a/src/error-handling.ts b/call-ai/error-handling.ts similarity index 70% rename from src/error-handling.ts rename to call-ai/error-handling.ts index 77ec68f..14543db 100644 --- a/src/error-handling.ts +++ b/call-ai/error-handling.ts @@ -1,12 +1,8 @@ /** * Error handling utilities for call-ai */ -import { - keyStore, - globalDebug, - isNewKeyError, - refreshApiKey, -} from "./key-management"; +import { keyStore, globalDebug, isNewKeyError, refreshApiKey } from "./key-management.js"; +import { CallAIError, CallAIErrorParams } from "./types.js"; // Standardized API error handler // @param error The error object @@ -14,7 +10,7 @@ import { // @param debug Whether to log debug information // @param options Options for error handling including key refresh control async function handleApiError( - error: any, + ierror: unknown, context: string, debug: boolean = globalDebug, options: { @@ -25,14 +21,15 @@ async function handleApiError( updateRefreshToken?: (currentToken: string) => Promise; } = {}, ): Promise { + const error = ierror as CallAIErrorParams; + // Extract error details const errorMessage = error?.message || String(error); const status = error?.status || error?.statusCode || error?.response?.status || - (errorMessage.match(/status: (\d+)/i)?.[1] && - parseInt(errorMessage.match(/status: (\d+)/i)![1])); + (errorMessage.match(/status: (\d+)/i)?.[1] && parseInt(errorMessage.match(/status: (\d+)/i)?.[1] ?? "500")); // Check if this is a missing API key error const isMissingKeyError = errorMessage.includes("API key is required"); @@ -59,25 +56,18 @@ async function handleApiError( // If the error suggests an API key issue, try to refresh the key if (needsNewKey) { if (debug) { - console.log( - `[callAi:key-refresh] Error suggests API key issue, attempting refresh...`, - ); + console.log(`[callAi:key-refresh] Error suggests API key issue, attempting refresh...`); } try { // Use provided key/endpoint/refreshToken or fallback to global configuration const currentKey = options.apiKey || keyStore.current; const endpoint = options.endpoint || keyStore.refreshEndpoint; - let refreshToken = options.refreshToken || keyStore.refreshToken; + const refreshToken = options.refreshToken || keyStore.refreshToken; // First attempt to refresh the API key try { - const { apiKey, topup } = await refreshApiKey( - currentKey, - endpoint, - refreshToken, - debug, - ); + const { apiKey, topup } = await refreshApiKey(currentKey, endpoint, refreshToken, debug); // Update the key in the store (if not already set by refreshApiKey) if (keyStore.current !== apiKey) { @@ -85,9 +75,7 @@ async function handleApiError( } if (debug) { - console.log( - `[callAi:key-refresh] ${topup ? "Topped up" : "Refreshed"} API key successfully`, - ); + console.log(`[callAi:key-refresh] ${topup ? "Topped up" : "Refreshed"} API key successfully`); } // Return without throwing since we've successfully recovered @@ -96,33 +84,23 @@ async function handleApiError( // If there's an updateRefreshToken callback and the error was due to token issue if (options.updateRefreshToken && refreshToken) { if (debug) { - console.log( - `[callAi:key-refresh] Initial refresh failed, attempting to update refresh token`, - ); + console.log(`[callAi:key-refresh] Initial refresh failed, attempting to update refresh token`); } try { // Get a new refresh token using the callback - const newRefreshToken = - await options.updateRefreshToken(refreshToken); + const newRefreshToken = await options.updateRefreshToken(refreshToken); if (newRefreshToken && newRefreshToken !== refreshToken) { if (debug) { - console.log( - `[callAi:key-refresh] Got new refresh token, retrying key refresh`, - ); + console.log(`[callAi:key-refresh] Got new refresh token, retrying key refresh`); } // Update the stored refresh token keyStore.refreshToken = newRefreshToken; // Try again with the new token - const { apiKey, topup } = await refreshApiKey( - currentKey, - endpoint, - newRefreshToken, - debug, - ); + const { apiKey, topup } = await refreshApiKey(currentKey, endpoint, newRefreshToken, debug); // Update the key in the store if (keyStore.current !== apiKey) { @@ -139,19 +117,14 @@ async function handleApiError( return; } else { if (debug) { - console.log( - `[callAi:key-refresh] No new refresh token provided or same token returned, cannot retry`, - ); + console.log(`[callAi:key-refresh] No new refresh token provided or same token returned, cannot retry`); } // Continue to error handling throw initialRefreshError; } } catch (tokenUpdateError) { if (debug) { - console.error( - `[callAi:key-refresh] Failed to update refresh token:`, - tokenUpdateError, - ); + console.error(`[callAi:key-refresh] Failed to update refresh token:`, tokenUpdateError); } // Continue to error handling with the original refresh error throw initialRefreshError; @@ -164,30 +137,28 @@ async function handleApiError( } catch (refreshError) { // Log refresh failure but throw the original error if (debug) { - console.error( - `[callAi:key-refresh] API key refresh failed:`, - refreshError, - ); + console.error(`[callAi:key-refresh] API key refresh failed:`, refreshError); } // Create a more detailed error from the original one - const detailedError = new Error( - `${errorMessage} (Key refresh failed: ${refreshError instanceof Error ? refreshError.message : String(refreshError)})`, - ); - // Preserve error metadata from the original error - (detailedError as any).originalError = error; - (detailedError as any).refreshError = refreshError; - (detailedError as any).status = status || 401; + const detailedError = new CallAIError({ + message: `${errorMessage} (Key refresh failed: ${refreshError instanceof Error ? refreshError.message : String(refreshError)})`, + originalError: error, + refreshError, + status: status || 401, + contentType: "text/plain", + }); throw detailedError; } } // For non-key errors, create a detailed error object - const detailedError = new Error(`${context}: ${errorMessage}`); - (detailedError as any).originalError = error; - (detailedError as any).status = status || 500; - (detailedError as any).errorType = error?.name || "Error"; - + const detailedError = new CallAIError({ + message: `${context}: ${errorMessage}`, + originalError: error, + status: status || 500, + errorType: error.name || "Error", + }); throw detailedError; } @@ -196,7 +167,7 @@ async function checkForInvalidModelError( response: Response, model: string, debug: boolean = globalDebug, -): Promise<{ isInvalidModel: boolean; errorData?: any }> { +): Promise<{ isInvalidModel: boolean; errorData?: unknown }> { // Only check 4xx errors (which could indicate invalid model) if (response.status < 400 || response.status >= 500) { return { isInvalidModel: false }; @@ -209,12 +180,14 @@ async function checkForInvalidModelError( let errorData; try { errorData = await responseClone.json(); + // eslint-disable-next-line @typescript-eslint/no-unused-vars } catch (e) { // If it's not JSON, get the text try { const text = await responseClone.text(); errorData = { error: text }; - } catch (textError) { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + } catch (e) { errorData = { error: `Error ${response.status}: ${response.statusText}` }; } } @@ -241,10 +214,7 @@ async function checkForInvalidModelError( errorData.error.message.toLowerCase().includes("unavailable"))))); if (debug && isInvalidModelError) { - console.log( - `[callAi:model-fallback] Detected invalid model error for "${model}":`, - errorData, - ); + console.log(`[callAi:model-fallback] Detected invalid model error for "${model}":`, errorData); } return { isInvalidModel: isInvalidModelError, errorData }; diff --git a/src/image.ts b/call-ai/image.ts similarity index 67% rename from src/image.ts rename to call-ai/image.ts index 47e44b2..5349abf 100644 --- a/src/image.ts +++ b/call-ai/image.ts @@ -2,11 +2,11 @@ * Image generation API implementation for call-ai * Integration with custom image generation API */ -import { ImageGenOptions, ImageResponse } from "./types"; +import { ImageGenOptions, ImageResponse } from "./types.js"; +import { callAiEnv } from "./utils.js"; +import { PACKAGE_VERSION } from "./version.js"; // Import package version for debugging (same as main API) -// eslint-disable-next-line @typescript-eslint/no-var-requires -const PACKAGE_VERSION = require("../package.json").version; /** * Generate images using a custom API that mimics OpenAI's image generation capabilities @@ -14,40 +14,23 @@ const PACKAGE_VERSION = require("../package.json").version; * @param options Configuration options for the image generation request * @returns A Promise that resolves to the image response containing base64 encoded image data */ -export async function imageGen( - prompt: string, - options: ImageGenOptions = {}, -): Promise { - const { - model = "gpt-image-1", - apiKey = "VIBES_DIY", - debug = false, - size = "1024x1024", - } = options; +export async function imageGen(prompt: string, options: ImageGenOptions = {}): Promise { + const { model = "gpt-image-1", apiKey = "VIBES_DIY", debug = false, size = "1024x1024" } = options; if (debug) { - console.log( - `[imageGen:${PACKAGE_VERSION}] Generating image with prompt: ${prompt.substring(0, 50)}...`, - ); + console.log(`[imageGen:${PACKAGE_VERSION}] Generating image with prompt: ${prompt.substring(0, 50)}...`); console.log(`[imageGen:${PACKAGE_VERSION}] Using model: ${model}`); } // Get custom origin if set - const customOrigin = - options.imgUrl || - (typeof window !== "undefined" ? (window as any).CALLAI_IMG_URL : null) || - (typeof process !== "undefined" && process.env - ? process.env.CALLAI_IMG_URL - : null); + const customOrigin = options.imgUrl || callAiEnv.CALLAI_IMG_URL; try { // Handle image generation if (!options.images || options.images.length === 0) { // Simple image generation with text prompt // Use custom origin or document.location.origin - const origin = - customOrigin || - (typeof document !== "undefined" ? document.location.origin : ""); + const origin = customOrigin || (typeof document !== "undefined" ? document.location.origin : ""); const generateEndpoint = `${origin}/api/openai-image/generate`; const response = await fetch(generateEndpoint, { @@ -67,9 +50,7 @@ export async function imageGen( if (!response.ok) { const errorData = await response.text(); - throw new Error( - `Image generation failed: ${response.status} ${response.statusText} - ${errorData}`, - ); + throw new Error(`Image generation failed: ${response.status} ${response.statusText} - ${errorData}`); } const result = await response.json(); @@ -91,9 +72,7 @@ export async function imageGen( if (options.style) formData.append("style", options.style); // Use custom origin or document.location.origin - const origin = - customOrigin || - (typeof document !== "undefined" ? document.location.origin : ""); + const origin = customOrigin || (typeof document !== "undefined" ? document.location.origin : ""); const editEndpoint = `${origin}/api/openai-image/edit`; const response = await fetch(editEndpoint, { @@ -106,9 +85,7 @@ export async function imageGen( if (!response.ok) { const errorData = await response.text(); - throw new Error( - `Image editing failed: ${response.status} ${response.statusText} - ${errorData}`, - ); + throw new Error(`Image editing failed: ${response.status} ${response.statusText} - ${errorData}`); } const result = await response.json(); diff --git a/call-ai/index.ts b/call-ai/index.ts new file mode 100644 index 0000000..8a7de4a --- /dev/null +++ b/call-ai/index.ts @@ -0,0 +1,18 @@ +/** + * call-ai: A lightweight library for making AI API calls + */ + +// Export public types +export * from "./types.js"; + +// Export API functions +export { callAi } from "./api.js"; +// Backward compatibility for callAI (uppercase AI) +export { callAi as callAI } from "./api.js"; + +export { getMeta } from "./response-metadata.js"; + +// Export image generation function +export { imageGen } from "./image.js"; + +export { callAiEnv } from "./utils.js"; diff --git a/src/index.ts.bak b/call-ai/index.ts.bak similarity index 100% rename from src/index.ts.bak rename to call-ai/index.ts.bak diff --git a/src/key-management.ts b/call-ai/key-management.ts similarity index 65% rename from src/key-management.ts rename to call-ai/key-management.ts index 6821e34..45b2a6e 100644 --- a/src/key-management.ts +++ b/call-ai/key-management.ts @@ -2,20 +2,33 @@ * Key management functionality for call-ai */ +import { entriesHeaders } from "../test/test-helper.js"; +import { CallAIErrorParams, Falsy } from "./types.js"; +import { callAiEnv } from "./utils.js"; + +export interface KeyMetadata { + key: string; + hash: string; + created: Date; + expires: Date; + remaining: number; + limit: number; +} + // Internal key store to keep track of the latest key const keyStore = { // Default key from environment or config - current: null as string | null, + current: undefined as string | undefined, // The refresh endpoint URL - defaults to vibecode.garden - refreshEndpoint: "https://vibecode.garden" as string | null, + refreshEndpoint: "https://vibecode.garden", // Authentication token for refresh endpoint - defaults to use-vibes - refreshToken: "use-vibes" as string | null, + refreshToken: "use-vibes" as string | Falsy, // Flag to prevent concurrent refresh attempts isRefreshing: false, // Timestamp of last refresh attempt (to prevent too frequent refreshes) lastRefreshAttempt: 0, // Storage for key metadata (useful for future top-up implementation) - metadata: {} as Record, + metadata: {} as Record>, }; // Global debug flag @@ -26,55 +39,10 @@ let globalDebug = false; */ function initKeyStore() { // Initialize with environment variables if available - if (typeof process !== "undefined" && process.env) { - if (process.env.CALLAI_API_KEY) { - keyStore.current = process.env.CALLAI_API_KEY; - } - - // Support both CALLAI_REFRESH_ENDPOINT and CALLAI_REKEY_ENDPOINT for backward compatibility - if (process.env.CALLAI_REFRESH_ENDPOINT) { - keyStore.refreshEndpoint = process.env.CALLAI_REFRESH_ENDPOINT; - } else if (process.env.CALLAI_REKEY_ENDPOINT) { - keyStore.refreshEndpoint = process.env.CALLAI_REKEY_ENDPOINT; - } else { - // Default to vibecode.garden if not specified - keyStore.refreshEndpoint = "https://vibecode.garden"; - } - - // Support both CALL_AI_REFRESH_TOKEN and CALL_AI_KEY_TOKEN for backward compatibility - if (process.env.CALL_AI_REFRESH_TOKEN) { - keyStore.refreshToken = process.env.CALL_AI_REFRESH_TOKEN; - } else if (process.env.CALL_AI_KEY_TOKEN) { - keyStore.refreshToken = process.env.CALL_AI_KEY_TOKEN; - } else { - // Default to use-vibes if not specified - this is the default token for vibecode.garden - keyStore.refreshToken = "use-vibes"; - } - - // Check for CALLAI_DEBUG environment variable (any truthy value works) - if (process.env.CALLAI_DEBUG) { - // Set the global debug flag - globalDebug = true; - } - } - // Initialize from window globals if in browser context - else if (typeof window !== "undefined") { - // Use window.CALLAI_API_KEY or window.callAi.API_KEY if available - if ((window as any).CALLAI_API_KEY) { - keyStore.current = (window as any).CALLAI_API_KEY; - } else if ((window as any).callAi?.API_KEY) { - keyStore.current = (window as any).callAi.API_KEY; - } - - // Check for debug flag in browser environment - if ((window as any).CALLAI_DEBUG) { - globalDebug = true; - } - keyStore.refreshEndpoint = - (window as any).CALLAI_REFRESH_ENDPOINT || keyStore.refreshEndpoint; - keyStore.refreshToken = - (window as any).CALL_AI_REFRESH_TOKEN || keyStore.refreshToken; - } + keyStore.current = callAiEnv.CALLAI_API_KEY; + keyStore.refreshEndpoint = callAiEnv.CALLAI_REFRESH_ENDPOINT ?? "https://vibecode.garden"; + keyStore.refreshToken = callAiEnv.CALL_AI_REFRESH_TOKEN ?? "use-vibes"; + globalDebug = !!callAiEnv.CALLAI_DEBUG; } // Initialize on module load @@ -86,9 +54,10 @@ initKeyStore(); * @param debug Whether to log debug information * @returns True if the error suggests we need a new key */ -function isNewKeyError(error: any, debug: boolean = false): boolean { +function isNewKeyError(ierror: unknown, debug = false): boolean { + const error = ierror as CallAIErrorParams; // Extract status from error object or message text - let status = error?.status || error?.statusCode || error?.response?.status; + let status = error?.status || error?.statusCode || error?.response?.status || 450; const errorMessage = String(error || "").toLowerCase(); // Extract status code from error message if not found in the object properties @@ -124,9 +93,7 @@ function isNewKeyError(error: any, debug: boolean = false): boolean { // Check for OpenAI specific error patterns const isOpenAIKeyError = - errorMessage.includes("openai") && - (errorMessage.includes("api key") || - errorMessage.includes("authentication")); + errorMessage.includes("openai") && (errorMessage.includes("api key") || errorMessage.includes("authentication")); // Check for rate limit errors which might indicate a key top-up is needed const isRateLimitError = @@ -144,18 +111,10 @@ function isNewKeyError(error: any, debug: boolean = false): boolean { errorMessage.includes("account"); // Simple heuristic: if it's a 4xx error with any key-related terms, likely needs key refresh - const needsNewKey = - is4xx && - (isAuthError || - isInvalidKeyError || - isOpenAIKeyError || - isRateLimitError || - isBillingError); + const needsNewKey = is4xx && (isAuthError || isInvalidKeyError || isOpenAIKeyError || isRateLimitError || isBillingError); if (debug && needsNewKey) { - console.log( - `[callAi:key-refresh] Detected error requiring key refresh: ${errorMessage}`, - ); + console.log(`[callAi:key-refresh] Detected error requiring key refresh: ${errorMessage}`); } return needsNewKey; @@ -169,9 +128,9 @@ function isNewKeyError(error: any, debug: boolean = false): boolean { * @returns Object containing the API key and topup flag */ async function refreshApiKey( - currentKey: string | null, - endpoint: string | null, - refreshToken: string | null, + currentKey: string | Falsy, + endpoint: string | Falsy, + refreshToken: string | Falsy, debug: boolean = globalDebug, ): Promise<{ apiKey: string; topup: boolean }> { // Ensure we have an endpoint and refreshToken @@ -206,14 +165,10 @@ async function refreshApiKey( if (timeSinceLastRefresh < minRefreshInterval) { if (debug) { - console.log( - `Rate limiting key refresh, last attempt was ${timeSinceLastRefresh}ms ago`, - ); + console.log(`Rate limiting key refresh, last attempt was ${timeSinceLastRefresh}ms ago`); } // If we've refreshed too recently, wait a bit - await new Promise((resolve) => - setTimeout(resolve, minRefreshInterval - timeSinceLastRefresh), - ); + await new Promise((resolve) => setTimeout(resolve, minRefreshInterval - timeSinceLastRefresh)); } // Set refreshing flag and update last attempt timestamp @@ -221,7 +176,7 @@ async function refreshApiKey( keyStore.lastRefreshAttempt = Date.now(); // Process API paths - let apiPath = "/api/keys"; + const apiPath = "/api/keys"; // Normalize endpoint URL to remove any trailing slashes const baseUrl = endpoint.endsWith("/") ? endpoint.slice(0, -1) : endpoint; @@ -261,13 +216,9 @@ async function refreshApiKey( }); if (debug) { - console.log( - `[callAi:key-refresh] Response status: ${response.status} ${response.statusText}`, - ); - console.log( - `[callAi:key-refresh] Response headers:`, - Object.fromEntries([...response.headers.entries()]), - ); + console.log(`[callAi:key-refresh] Response status: ${response.status} ${response.statusText}`); + + console.log(`[callAi:key-refresh] Response headers:`, Object.fromEntries([...entriesHeaders(response.headers)])); } if (!response.ok) { @@ -276,9 +227,7 @@ async function refreshApiKey( if (debug) { console.log(`[callAi:key-refresh] Error response body: ${errorText}`); } - throw new Error( - `API key refresh failed: ${response.status} ${response.statusText}${errorText ? ` - ${errorText}` : ""}`, - ); + throw new Error(`API key refresh failed: ${response.status} ${response.statusText}${errorText ? ` - ${errorText}` : ""}`); } // Parse the response @@ -286,10 +235,7 @@ async function refreshApiKey( // Log the complete response structure for debugging if (debug) { - console.log( - `[callAi:key-refresh] Full response structure:`, - JSON.stringify(data, null, 2), - ); + console.log(`[callAi:key-refresh] Full response structure:`, JSON.stringify(data, null, 2)); } // Handle different API response formats @@ -305,22 +251,15 @@ async function refreshApiKey( } // Handle error case else { - throw new Error( - "Invalid response from key refresh endpoint: missing or malformed key", - ); + throw new Error("Invalid response from key refresh endpoint: missing or malformed key"); } if (debug) { - console.log( - `API key refreshed successfully: ${newKey.substring(0, 10)}...`, - ); + console.log(`API key refreshed successfully: ${newKey.substring(0, 10)}...`); } // Store metadata for potential future use (like top-up) - if ( - data.metadata || - (data.key && typeof data.key === "object" && data.key.metadata) - ) { + if (data.metadata || (data.key && typeof data.key === "object" && data.key.metadata)) { const metadata = data.metadata || data.key.metadata; storeKeyMetadata(metadata); } @@ -330,10 +269,8 @@ async function refreshApiKey( // Determine if this was a top-up (using existing key) or new key // For the new API response format, hash is in data.key.hash - const hashValue = - data.hash || (data.key && typeof data.key === "object" && data.key.hash); - const isTopup = - currentKey && hashValue && hashValue === getHashFromKey(currentKey); + const hashValue = data.hash || (data.key && typeof data.key === "object" && data.key.hash); + const isTopup = currentKey && hashValue && hashValue === getHashFromKey(currentKey); // Reset refreshing flag keyStore.isRefreshing = false; @@ -362,25 +299,17 @@ function getHashFromKey(key: string): string | null { /** * Helper function to store key metadata for future reference */ -function storeKeyMetadata(data: any): void { +function storeKeyMetadata(data: KeyMetadata): void { if (!data || !data.key) return; // Store metadata with the key as the dictionary key keyStore.metadata[data.key] = { - hash: data.hash || null, + hash: data.hash, created: data.created || Date.now(), - expires: data.expires || null, - remaining: data.remaining || null, - limit: data.limit || null, + expires: data.expires, + remaining: data.remaining, + limit: data.limit, }; } -export { - keyStore, - globalDebug, - initKeyStore, - isNewKeyError, - refreshApiKey, - getHashFromKey, - storeKeyMetadata, -}; +export { keyStore, globalDebug, initKeyStore, isNewKeyError, refreshApiKey, getHashFromKey, storeKeyMetadata }; diff --git a/src/non-streaming.ts b/call-ai/non-streaming.ts similarity index 76% rename from src/non-streaming.ts rename to call-ai/non-streaming.ts index 2013c6c..80b87d1 100644 --- a/src/non-streaming.ts +++ b/call-ai/non-streaming.ts @@ -1,30 +1,22 @@ /** * Non-streaming API call implementation for call-ai */ -import { CallAIOptions, Message, SchemaStrategy } from "./types"; -import { globalDebug, keyStore } from "./key-management"; -import { handleApiError, checkForInvalidModelError } from "./error-handling"; -import { responseMetadata, boxString } from "./response-metadata"; +import { AIResult, CallAIErrorParams, CallAIOptions, Message, SchemaAIMessageRequest, SchemaStrategy } from "./types.js"; +import { globalDebug, keyStore, initKeyStore } from "./key-management.js"; +import { handleApiError, checkForInvalidModelError } from "./error-handling.js"; +import { responseMetadata, boxString } from "./response-metadata.js"; +import { PACKAGE_VERSION } from "./version.js"; // Import package version for debugging -// eslint-disable-next-line @typescript-eslint/no-var-requires -const PACKAGE_VERSION = require("../package.json").version; const FALLBACK_MODEL = "openrouter/auto"; // Internal implementation for non-streaming API calls -async function callAINonStreaming( - prompt: string | Message[], - options: CallAIOptions = {}, - isRetry: boolean = false, -): Promise { +async function callAINonStreaming(prompt: string | Message[], options: CallAIOptions = {}, isRetry = false): Promise { // Ensure keyStore is initialized first - const { initKeyStore } = require("./key-management"); initKeyStore(); // Convert simple string prompts to message array format - const messages = Array.isArray(prompt) - ? prompt - : [{ role: "user", content: prompt }]; + const messages = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt } satisfies Message]; // API key should be provided by options (validation happens in callAi) const apiKey = options.apiKey; @@ -38,24 +30,22 @@ async function callAINonStreaming( // Choose a schema strategy based on model const schemaStrategy = options.schemaStrategy; + if (!schemaStrategy) { + throw new Error("Schema strategy is required for non-streaming calls"); + } // Default to JSON response for certain models - const responseFormat = - options.responseFormat || /gpt-4/.test(model) || /gpt-3.5/.test(model) - ? "json" - : undefined; + const responseFormat = options.responseFormat || /gpt-4/.test(model) || /gpt-3.5/.test(model) ? "json" : undefined; const debug = options.debug === undefined ? globalDebug : options.debug; if (debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Making non-streaming request to: ${url}`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Making non-streaming request to: ${url}`); console.log(`[callAi:${PACKAGE_VERSION}] With model: ${model}`); } // Build request body - const requestBody: any = { + const requestBody: SchemaAIMessageRequest = { model, messages, max_tokens: options.maxTokens || 2048, @@ -71,10 +61,7 @@ async function callAINonStreaming( // Add schema-specific parameters (if schema is provided) if (options.schema) { - Object.assign( - requestBody, - schemaStrategy.prepareRequest(options.schema, messages), - ); + Object.assign(requestBody, schemaStrategy.prepareRequest(options.schema, messages)); } // Add HTTP referer and other options to help with abuse prevention @@ -110,7 +97,7 @@ async function callAINonStreaming( "debug", ].includes(key) ) { - requestBody[key] = (options as any)[key]; + requestBody[key] = options[key]; } }); @@ -141,17 +128,11 @@ async function callAINonStreaming( // Handle HTTP errors if (!response.ok) { // Check if this is an invalid model error that we can handle with a fallback - const { isInvalidModel, errorData } = await checkForInvalidModelError( - response, - model, - debug, - ); + const { isInvalidModel, errorData } = await checkForInvalidModelError(response, model, debug); if (isInvalidModel && !isRetry && !options.skipRetry) { if (debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Invalid model "${model}", falling back to "${FALLBACK_MODEL}"`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Invalid model "${model}", falling back to "${FALLBACK_MODEL}"`); } // Retry with the fallback model @@ -166,9 +147,7 @@ async function callAINonStreaming( } // For other errors, throw with details - const errorText = errorData - ? JSON.stringify(errorData) - : `HTTP error! Status: ${response.status}`; + const errorText = errorData ? JSON.stringify(errorData) : `HTTP error! Status: ${response.status}`; throw new Error(errorText); } @@ -183,9 +162,7 @@ async function callAINonStreaming( result = extractContent(json, schemaStrategy); } } catch (parseError) { - throw new Error( - `Failed to parse API response: ${parseError instanceof Error ? parseError.message : String(parseError)}`, - ); + throw new Error(`Failed to parse API response: ${parseError instanceof Error ? parseError.message : String(parseError)}`); } // Update metadata with completion timing @@ -194,8 +171,7 @@ async function callAINonStreaming( meta.timing.duration = endTime - meta.timing.startTime; // Store metadata for this response - const resultString = - typeof result === "string" ? result : JSON.stringify(result); + const resultString = typeof result === "string" ? result : JSON.stringify(result); // Box the string for WeakMap storage const boxed = boxString(resultString); @@ -204,23 +180,18 @@ async function callAINonStreaming( return resultString; } catch (error) { // Check if this is a network/fetch error - const isNetworkError = - error instanceof Error && - (error.message.includes("Network") || error.name === "TypeError"); + const isNetworkError = error instanceof Error && (error.message.includes("Network") || error.name === "TypeError"); if (isNetworkError) { // Direct re-throw for network errors (original implementation pattern) if (debug) { - console.error( - `[callAi:${PACKAGE_VERSION}] Network error during fetch:`, - error, - ); + console.error(`[callAi:${PACKAGE_VERSION}] Network error during fetch:`, error); } throw error; } // For other errors, use API error handling - await handleApiError(error, "Non-streaming API call", options.debug, { + await handleApiError(error as CallAIErrorParams, "Non-streaming API call", options.debug, { apiKey: apiKey || undefined, endpoint: options.endpoint || undefined, skipRefresh: options.skipRefresh, @@ -229,9 +200,7 @@ async function callAINonStreaming( // If handleApiError refreshed the key, we want to retry with the new key if (keyStore.current && keyStore.current !== apiKey) { if (debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Retrying with refreshed API key`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Retrying with refreshed API key`); } // Retry the request with the new key @@ -252,7 +221,7 @@ async function callAINonStreaming( } // Extract content from API response accounting for different formats -function extractContent(result: any, schemaStrategy: SchemaStrategy): any { +function extractContent(result: AIResult, schemaStrategy: SchemaStrategy): string { // Debug output has been removed for brevity if (!result) { @@ -307,13 +276,16 @@ function extractContent(result: any, schemaStrategy: SchemaStrategy): any { return schemaStrategy.processResponse(choice.text); } } + if (typeof result !== "string") { + throw new Error(`Failed to extract content from API response: ${JSON.stringify(result)}`); + } // Return raw result if we couldn't extract content return result; } // Extract response from Claude API with timeout handling -async function extractClaudeResponse(response: Response): Promise { +async function extractClaudeResponse(response: Response): Promise> { try { const timeoutPromise = new Promise((_, reject) => { setTimeout(() => { @@ -326,28 +298,15 @@ async function extractClaudeResponse(response: Response): Promise { // Race between timeout and response const json = await Promise.race([responsePromise, timeoutPromise]); - if ( - json.choices && - json.choices.length > 0 && - json.choices[0].message && - json.choices[0].message.content - ) { + if (json.choices && json.choices.length > 0 && json.choices[0].message && json.choices[0].message.content) { return json.choices[0].message.content; } // If content not found in expected structure, return the whole JSON return json; } catch (error) { - throw new Error( - `Failed to extract Claude response: ${error instanceof Error ? error.message : String(error)}`, - ); + throw new Error(`Failed to extract Claude response: ${error instanceof Error ? error.message : String(error)}`); } } -export { - callAINonStreaming, - extractContent, - extractClaudeResponse, - PACKAGE_VERSION, - FALLBACK_MODEL, -}; +export { callAINonStreaming, extractContent, extractClaudeResponse, PACKAGE_VERSION, FALLBACK_MODEL }; diff --git a/call-ai/package.json b/call-ai/package.json new file mode 100644 index 0000000..c6d9115 --- /dev/null +++ b/call-ai/package.json @@ -0,0 +1,37 @@ +{ + "name": "call-ai", + "version": "0.0.0", + "description": "Lightweight library for making AI API calls with streaming support", + "repository": { + "type": "git", + "url": "https://github.com/fireproof-storage/call-ai.git" + }, + "homepage": "https://github.com/fireproof-storage/call-ai", + "bugs": { + "url": "https://github.com/fireproof-storage/call-ai/issues" + }, + "scripts": { + "build": "tsc" + }, + "keywords": [ + "ai", + "llm", + "api", + "call", + "openai", + "streaming", + "openrouter" + ], + "contributors": [ + "J Chris Anderson", + "Meno Abels" + ], + "license": "Apache-2.0", + "devDependencies": { + "@types/node": "^24.0.15", + "typescript": "^5.8.3" + }, + "engines": { + "node": ">=20.0.0" + } +} diff --git a/src/response-metadata.ts b/call-ai/response-metadata.ts similarity index 85% rename from src/response-metadata.ts rename to call-ai/response-metadata.ts index a080d09..a032fd7 100644 --- a/src/response-metadata.ts +++ b/call-ai/response-metadata.ts @@ -2,7 +2,7 @@ * Response metadata handling for call-ai */ -import { ResponseMeta } from "./types"; +import { ResponseMeta } from "./types.js"; // WeakMap to store metadata for responses without modifying the response objects const responseMetadata = new WeakMap(); @@ -17,7 +17,7 @@ const stringResponseMap = new Map(); function boxString(str: string): object { // Check if already boxed if (stringResponseMap.has(str)) { - return stringResponseMap.get(str)!; + return stringResponseMap.get(str) as object; } // Create a new box const box = Object.create(null); @@ -30,9 +30,7 @@ function boxString(str: string): object { * @param response A response from callAi, either string or AsyncGenerator * @returns The metadata object if available, undefined otherwise */ -function getMeta( - response: string | AsyncGenerator, -): ResponseMeta | undefined { +function getMeta(response: string | AsyncGenerator): ResponseMeta | undefined { if (typeof response === "string") { const box = stringResponseMap.get(response); if (box) { diff --git a/call-ai/strategies/index.ts b/call-ai/strategies/index.ts new file mode 100644 index 0000000..7c227f1 --- /dev/null +++ b/call-ai/strategies/index.ts @@ -0,0 +1,5 @@ +/** + * Strategy exports + */ +export * from "./model-strategies.js"; +export * from "./strategy-selector.js"; diff --git a/src/strategies/model-strategies.ts b/call-ai/strategies/model-strategies.ts similarity index 69% rename from src/strategies/model-strategies.ts rename to call-ai/strategies/model-strategies.ts index 38496ae..808523d 100644 --- a/src/strategies/model-strategies.ts +++ b/call-ai/strategies/model-strategies.ts @@ -1,8 +1,17 @@ /** * Model strategies for different AI models */ -import { Message, ModelStrategy } from "../types"; -import { recursivelyAddAdditionalProperties } from "../utils"; +import { + isToolUseType, + Message, + ModelStrategy, + OpenAIFunctionCall, + ProcessedSchema, + SchemaAIJsonSchemaRequest, + SchemaDescription, + SchemaType, +} from "../types.js"; +import { recursivelyAddAdditionalProperties } from "../utils.js"; /** * OpenAI/GPT strategy for handling JSON schema @@ -10,31 +19,19 @@ import { recursivelyAddAdditionalProperties } from "../utils"; export const openAIStrategy: ModelStrategy = { name: "openai", prepareRequest: (schema) => { - if (!schema) return {}; + if (!schema) throw new Error("Schema strategy not implemented"); // Process schema for JSON schema approach - const requiredFields = - schema.required || Object.keys(schema.properties || {}); + const requiredFields = schema.required || Object.keys(schema.properties || {}); const processedSchema = recursivelyAddAdditionalProperties({ type: "object", properties: schema.properties || {}, required: requiredFields, - additionalProperties: - schema.additionalProperties !== undefined - ? schema.additionalProperties - : false, + additionalProperties: schema.additionalProperties !== undefined ? schema.additionalProperties : false, // Copy any additional schema properties ...Object.fromEntries( - Object.entries(schema).filter( - ([key]) => - ![ - "name", - "properties", - "required", - "additionalProperties", - ].includes(key), - ), + Object.entries(schema).filter(([key]) => !["name", "properties", "required", "additionalProperties"].includes(key)), ), }); @@ -47,7 +44,7 @@ export const openAIStrategy: ModelStrategy = { schema: processedSchema, }, }, - }; + } satisfies SchemaAIJsonSchemaRequest; }, processResponse: (content) => { if (typeof content !== "string") { @@ -84,18 +81,15 @@ export const claudeStrategy: ModelStrategy = { name: "anthropic", shouldForceStream: true, prepareRequest: (schema) => { - if (!schema) return {}; + if (!schema) throw new Error("Schema strategy not implemented"); // Process schema for tool use - format for OpenRouter/Claude const processedSchema = { type: "object", properties: schema.properties || {}, required: schema.required || Object.keys(schema.properties || {}), - additionalProperties: - schema.additionalProperties !== undefined - ? schema.additionalProperties - : false, - }; + additionalProperties: schema.additionalProperties !== undefined ? schema.additionalProperties : false, + } satisfies ProcessedSchema; return { tools: [ @@ -113,31 +107,28 @@ export const claudeStrategy: ModelStrategy = { function: { name: schema.name || "generate_structured_data", }, - }, + } satisfies OpenAIFunctionCall, }; }, processResponse: (content) => { // Handle tool use response - if (typeof content === "object") { + if (isToolUseType(content)) { if (content.type === "tool_use") { return JSON.stringify(content.input); } // Handle newer tool_calls format - if ( - content.tool_calls && - Array.isArray(content.tool_calls) && - content.tool_calls.length > 0 - ) { + if (content.tool_calls && Array.isArray(content.tool_calls) && content.tool_calls.length > 0) { const toolCall = content.tool_calls[0]; if (toolCall.function && toolCall.function.arguments) { - try { - // Try to parse as JSON first - return toolCall.function.arguments; - } catch (e) { - // Return as is if not valid JSON - return JSON.stringify(toolCall.function.arguments); - } + // @jchris i don't get this lines + // try { + // // Try to parse as JSON first + // return toolCall.function.arguments; + // } catch (e) { + // // Return as is if not valid JSON + return JSON.stringify(toolCall.function.arguments); + // } } } @@ -149,11 +140,10 @@ export const claudeStrategy: ModelStrategy = { } // Try to extract JSON from content if it might be wrapped - const jsonMatch = content.match(/```json\s*([\s\S]*?)\s*```/) || - content.match(/```\s*([\s\S]*?)\s*```/) || - content.match(/\{[\s\S]*\}/) || [null, content]; + const jsonMatch = + content.match(/```json\s*([\s\S]*?)\s*```/) || content.match(/```\s*([\s\S]*?)\s*```/) || content.match(/\{[\s\S]*\}/); // || [null, content]; - return jsonMatch[1] || content; + return jsonMatch ? jsonMatch[1] : content; }, }; @@ -172,10 +162,8 @@ export const systemMessageStrategy: ModelStrategy = { // Build a schema description const schemaProperties = Object.entries(schema.properties || {}) .map(([key, value]) => { - const type = (value as any).type || "string"; - const description = (value as any).description - ? ` // ${(value as any).description}` - : ""; + const type = (value as SchemaType).type || "string"; + const description = (value as SchemaDescription).description ? ` // ${(value as SchemaDescription).description}` : ""; return ` "${key}": ${type}${description}`; }) .join(",\n"); @@ -210,7 +198,8 @@ export const systemMessageStrategy: ModelStrategy = { */ export const defaultStrategy: ModelStrategy = { name: "default", - prepareRequest: () => ({}), - processResponse: (content) => - typeof content === "string" ? content : JSON.stringify(content), + prepareRequest: () => { + throw new Error("Schema strategy not implemented"); + }, + processResponse: (content) => (typeof content === "string" ? content : JSON.stringify(content)), }; diff --git a/src/strategies/strategy-selector.ts b/call-ai/strategies/strategy-selector.ts similarity index 89% rename from src/strategies/strategy-selector.ts rename to call-ai/strategies/strategy-selector.ts index a2e23a4..bc35f75 100644 --- a/src/strategies/strategy-selector.ts +++ b/call-ai/strategies/strategy-selector.ts @@ -1,22 +1,13 @@ /** * Strategy selection logic for different AI models */ -import { Schema, SchemaStrategy } from "../types"; -import { - claudeStrategy, - defaultStrategy, - geminiStrategy, - openAIStrategy, - systemMessageStrategy, -} from "./model-strategies"; +import { Schema, SchemaStrategy } from "../types.js"; +import { claudeStrategy, defaultStrategy, geminiStrategy, openAIStrategy, systemMessageStrategy } from "./model-strategies.js"; /** * Choose the appropriate schema strategy based on model and schema */ -export function chooseSchemaStrategy( - model: string | undefined, - schema: Schema | null, -): SchemaStrategy { +export function chooseSchemaStrategy(model: string | undefined, schema: Schema | null): SchemaStrategy { // Default model if not provided const resolvedModel = model || (schema ? "openai/gpt-4o" : "openrouter/auto"); diff --git a/src/streaming.ts b/call-ai/streaming.ts similarity index 74% rename from src/streaming.ts rename to call-ai/streaming.ts index cbd7458..a1087ad 100644 --- a/src/streaming.ts +++ b/call-ai/streaming.ts @@ -2,11 +2,11 @@ * Streaming response handling for call-ai */ -import { CallAIOptions, SchemaStrategy } from "./types"; -import { globalDebug } from "./key-management"; -import { responseMetadata, boxString } from "./response-metadata"; -import { checkForInvalidModelError } from "./error-handling"; -import { PACKAGE_VERSION, FALLBACK_MODEL } from "./non-streaming"; +import { CallAIError, CallAIOptions, Message, ResponseMeta, SchemaAIMessageRequest, SchemaStrategy, ToolUseType } from "./types.js"; +import { globalDebug } from "./key-management.js"; +import { responseMetadata, boxString } from "./response-metadata.js"; +import { checkForInvalidModelError } from "./error-handling.js"; +import { PACKAGE_VERSION, FALLBACK_MODEL } from "./non-streaming.js"; // Generator factory function for streaming API calls // This is called after the fetch is made and response is validated @@ -21,7 +21,7 @@ async function* createStreamingGenerator( model: string, ): AsyncGenerator { // Create a metadata object for this streaming response - const meta = { + const meta: ResponseMeta = { model, endpoint: options.endpoint || "https://openrouter.ai/api/v1", timing: { @@ -37,18 +37,14 @@ async function* createStreamingGenerator( let chunkCount = 0; if (options.debug || globalDebug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Starting streaming generator with model: ${model}`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Starting streaming generator with model: ${model}`); } try { // Handle streaming response const reader = response.body?.getReader(); if (!reader) { - throw new Error( - "Response body is undefined - API endpoint may not support streaming", - ); + throw new Error("Response body is undefined - API endpoint may not support streaming"); } const textDecoder = new TextDecoder(); @@ -58,9 +54,7 @@ async function* createStreamingGenerator( const { done, value } = await reader.read(); if (done) { if (options.debug || globalDebug) { - console.log( - `[callAi-streaming:complete v${PACKAGE_VERSION}] Stream finished after ${chunkCount} chunks`, - ); + console.log(`[callAi-streaming:complete v${PACKAGE_VERSION}] Stream finished after ${chunkCount} chunks`); } break; } @@ -70,7 +64,7 @@ async function* createStreamingGenerator( buffer += chunk; // Split on double newlines to find complete SSE messages - let messages = buffer.split(/\n\n/); + const messages = buffer.split(/\n\n/); buffer = messages.pop() || ""; // Keep the last incomplete chunk in the buffer for (const message of messages) { @@ -79,7 +73,7 @@ async function* createStreamingGenerator( } // Extract the JSON payload - let jsonStr = message.slice(6); // Remove 'data: ' prefix + const jsonStr = message.slice(6); // Remove 'data: ' prefix if (jsonStr === "[DONE]") { if (options.debug || globalDebug) { console.log(`[callAi:${PACKAGE_VERSION}] Received [DONE] signal`); @@ -91,51 +85,37 @@ async function* createStreamingGenerator( // Try to parse the JSON try { + console.log(`[callAi:${PACKAGE_VERSION}] Raw chunk #${chunkCount}:`, jsonStr); const json = JSON.parse(jsonStr); // Check for error responses in the stream if ( json.error || json.type === "error" || - (json.choices && - json.choices.length > 0 && - json.choices[0].finish_reason === "error") + (json.choices && json.choices.length > 0 && json.choices[0].finish_reason === "error") ) { // Extract error message const errorMessage = - json.error?.message || - json.error || - json.choices?.[0]?.message?.content || - "Unknown streaming error"; + json.error?.message || json.error || json.choices?.[0]?.message?.content || "Unknown streaming error"; if (options.debug || globalDebug) { - console.error( - `[callAi:${PACKAGE_VERSION}] Detected error in streaming response:`, - json, - ); + console.error(`[callAi:${PACKAGE_VERSION}] Detected error in streaming response:`, json); } // Create a detailed error to throw - const detailedError = new Error( - `API streaming error: ${errorMessage}`, - ); - - // Add error metadata - (detailedError as any).status = json.error?.status || 400; - (detailedError as any).statusText = - json.error?.type || "Bad Request"; - (detailedError as any).details = JSON.stringify(json.error || json); - - console.error( - `[callAi:${PACKAGE_VERSION}] Throwing stream error:`, - detailedError, - ); + const detailedError = new CallAIError({ + message: `API streaming error: ${errorMessage}`, + status: json.error?.status || 400, + statusText: json.error?.type || "Bad Request", + details: JSON.stringify(json.error || json), + contentType: "application/json", + }); + console.error(`[callAi:${PACKAGE_VERSION}] Throwing stream error:`, detailedError); throw detailedError; } // Handle tool use response - Claude with schema cases - const isClaudeWithSchema = - /claude/i.test(model) && schemaStrategy.strategy === "tool_mode"; + const isClaudeWithSchema = /claude/i.test(model) && schemaStrategy.strategy === "tool_mode"; if (isClaudeWithSchema) { // Claude streaming tool calls - need to assemble arguments @@ -145,10 +125,7 @@ async function* createStreamingGenerator( // Handle finish reason tool_calls - this is where we know the tool call is complete if (choice.finish_reason === "tool_calls") { if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Received tool_calls finish reason. Assembled JSON:`, - toolCallsAssembled, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Received tool_calls finish reason. Assembled JSON:`, toolCallsAssembled); } // Full JSON collected, construct a proper object with it @@ -159,7 +136,8 @@ async function* createStreamingGenerator( try { // First try parsing as-is JSON.parse(toolCallsAssembled); - } catch (parseError) { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + } catch (e) { if (options.debug) { console.log( `[callAi:${PACKAGE_VERSION}] Attempting to fix malformed JSON in tool call:`, @@ -171,7 +149,7 @@ async function* createStreamingGenerator( let fixedJson = toolCallsAssembled; // 1. Remove trailing commas - fixedJson = fixedJson.replace(/,\s*([\}\]])/, "$1"); + fixedJson = fixedJson.replace(/,\s*([}\]])/, "$1"); // 2. Ensure proper JSON structure // Add closing braces if missing @@ -193,28 +171,17 @@ async function* createStreamingGenerator( // 3. Fix various property name/value split issues // Fix dangling property names without values - fixedJson = fixedJson.replace( - /"(\w+)"\s*:\s*$/g, - '"$1":null', - ); + fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*$/g, '"$1":null'); // Fix missing property values - fixedJson = fixedJson.replace( - /"(\w+)"\s*:\s*,/g, - '"$1":null,', - ); + fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*,/g, '"$1":null,'); // Fix incomplete property names (when split across chunks) - fixedJson = fixedJson.replace( - /"(\w+)"\s*:\s*"(\w+)$/g, - '"$1$2"', - ); + fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*"(\w+)$/g, '"$1$2"'); // Balance brackets - const openBrackets = (fixedJson.match(/\[/g) || []) - .length; - const closeBrackets = (fixedJson.match(/\]/g) || []) - .length; + const openBrackets = (fixedJson.match(/\[/g) || []).length; + const closeBrackets = (fixedJson.match(/\]/g) || []).length; if (openBrackets > closeBrackets) { fixedJson += "]".repeat(openBrackets - closeBrackets); } @@ -236,10 +203,7 @@ async function* createStreamingGenerator( yield completeText; continue; } catch (e) { - console.error( - "[callAIStreaming] Error handling assembled tool call:", - e, - ); + console.error("[callAIStreaming] Error handling assembled tool call:", e); } } @@ -247,17 +211,10 @@ async function* createStreamingGenerator( // Simply accumulate the raw strings without trying to parse them if (choice && choice.delta && choice.delta.tool_calls) { const toolCall = choice.delta.tool_calls[0]; - if ( - toolCall && - toolCall.function && - toolCall.function.arguments !== undefined - ) { + if (toolCall && toolCall.function && toolCall.function.arguments !== undefined) { toolCallsAssembled += toolCall.function.arguments; if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Accumulated tool call chunk:`, - toolCall.function.arguments, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Accumulated tool call chunk:`, toolCall.function.arguments); } } } @@ -265,10 +222,7 @@ async function* createStreamingGenerator( } // Handle tool use response - old format - if ( - isClaudeWithSchema && - (json.stop_reason === "tool_use" || json.type === "tool_use") - ) { + if (isClaudeWithSchema && (json.stop_reason === "tool_use" || json.type === "tool_use")) { // First try direct tool use object format if (json.type === "tool_use") { completeText = schemaStrategy.processResponse(json); @@ -278,9 +232,7 @@ async function* createStreamingGenerator( // Extract the tool use content if (json.content && Array.isArray(json.content)) { - const toolUseBlock = json.content.find( - (block: any) => block.type === "tool_use", - ); + const toolUseBlock = json.content.find((block: ToolUseType) => block.type === "tool_use"); if (toolUseBlock) { completeText = schemaStrategy.processResponse(toolUseBlock); yield completeText; @@ -292,9 +244,7 @@ async function* createStreamingGenerator( if (json.choices && Array.isArray(json.choices)) { const choice = json.choices[0]; if (choice.message && Array.isArray(choice.message.content)) { - const toolUseBlock = choice.message.content.find( - (block: any) => block.type === "tool_use", - ); + const toolUseBlock = choice.message.content.find((block: ToolUseType) => block.type === "tool_use"); if (toolUseBlock) { completeText = schemaStrategy.processResponse(toolUseBlock); yield completeText; @@ -304,9 +254,7 @@ async function* createStreamingGenerator( // Handle case where the tool use is in the delta if (choice.delta && Array.isArray(choice.delta.content)) { - const toolUseBlock = choice.delta.content.find( - (block: any) => block.type === "tool_use", - ); + const toolUseBlock = choice.delta.content.find((block: ToolUseType) => block.type === "tool_use"); if (toolUseBlock) { completeText = schemaStrategy.processResponse(toolUseBlock); yield completeText; @@ -331,10 +279,7 @@ async function* createStreamingGenerator( yield schemaStrategy.processResponse(completeText); } // Handle content blocks for Claude/Anthropic response format - else if ( - json.choices?.[0]?.message?.content && - Array.isArray(json.choices[0].message.content) - ) { + else if (json.choices?.[0]?.message?.content && Array.isArray(json.choices[0].message.content)) { const contentBlocks = json.choices[0].message.content; // Find text or tool_use blocks for (const block of contentBlocks) { @@ -350,17 +295,9 @@ async function* createStreamingGenerator( } // Find text delta for content blocks (Claude format) - if ( - json.type === "content_block_delta" && - json.delta && - json.delta.type === "text_delta" && - json.delta.text - ) { + if (json.type === "content_block_delta" && json.delta && json.delta.type === "text_delta" && json.delta.text) { if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Received text delta:`, - json.delta.text, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Received text delta:`, json.delta.text); } completeText += json.delta.text; // In some models like Claude, don't yield partial results as they can be malformed JSON @@ -391,18 +328,13 @@ async function* createStreamingGenerator( JSON.parse(result); } catch (e) { if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Final JSON validation failed:`, - e, - `\nAttempting to fix JSON:`, - result, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Final JSON validation failed:`, e, `\nAttempting to fix JSON:`, result); } // Apply more robust fixes for Claude's streaming JSON issues // 1. Remove trailing commas (common in malformed JSON) - result = result.replace(/,\s*([\}\]])/, "$1"); + result = result.replace(/,\s*([}\]])/, "$1"); // 2. Ensure we have proper JSON structure // Add closing braces if missing @@ -436,10 +368,7 @@ async function* createStreamingGenerator( } if (options.debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Applied final JSON fixes:`, - result, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Applied final JSON fixes:`, result); } } @@ -451,10 +380,7 @@ async function* createStreamingGenerator( JSON.parse(completeText); } catch (finalParseError) { if (options.debug) { - console.error( - `[callAi:${PACKAGE_VERSION}] Final JSON validation still failed:`, - finalParseError, - ); + console.error(`[callAi:${PACKAGE_VERSION}] Final JSON validation still failed:`, finalParseError); } } @@ -468,7 +394,7 @@ async function* createStreamingGenerator( // Add the rawResponse field to match non-streaming behavior // For streaming, we use the final complete text as the raw response - (meta as any).rawResponse = completeText; + meta.rawResponse = completeText; // Store metadata for this response const boxed = boxString(completeText); @@ -492,14 +418,12 @@ async function* createStreamingGenerator( // This is a higher-level function that prepares the request // and handles model fallback async function* callAIStreaming( - prompt: string | any[], + prompt: string | Message[], options: CallAIOptions = {}, - isRetry: boolean = false, + isRetry = false, ): AsyncGenerator { // Convert simple string prompts to message array format - const messages = Array.isArray(prompt) - ? prompt - : [{ role: "user", content: prompt }]; + const messages = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt } satisfies Message]; // API key should be provided by options (validation happens in callAi) const apiKey = options.apiKey; @@ -513,29 +437,27 @@ async function* callAIStreaming( // Choose a schema strategy based on model const schemaStrategy = options.schemaStrategy; + if (!schemaStrategy) { + throw new Error("Schema strategy is required for streaming"); + } // Default to JSON response for certain models - const responseFormat = - options.responseFormat || /gpt-4/.test(model) || /gpt-3.5/.test(model) - ? "json" - : undefined; + const responseFormat = options.responseFormat || /gpt-4/.test(model) || /gpt-3.5/.test(model) ? "json" : undefined; const debug = options.debug === undefined ? globalDebug : options.debug; if (debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Making streaming request to: ${url}`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Making streaming request to: ${url}`); console.log(`[callAi:${PACKAGE_VERSION}] With model: ${model}`); } // Build request body - const requestBody: any = { + const requestBody: SchemaAIMessageRequest = { model, messages, max_tokens: options.maxTokens || 2048, temperature: options.temperature !== undefined ? options.temperature : 0.7, - top_p: options.topP !== undefined ? options.topP : 1, + top_p: options.topP ? options.topP : 1, stream: true, }; @@ -546,10 +468,7 @@ async function* callAIStreaming( // Add schema-specific parameters (if schema is provided) if (options.schema) { - Object.assign( - requestBody, - schemaStrategy.prepareRequest(options.schema, messages), - ); + Object.assign(requestBody, schemaStrategy?.prepareRequest(options.schema, messages)); } // Add HTTP referer and other options to help with abuse prevention @@ -585,7 +504,7 @@ async function* callAIStreaming( "debug", ].includes(key) ) { - requestBody[key] = (options as any)[key]; + requestBody[key] = options[key]; } }); @@ -606,17 +525,11 @@ async function* callAIStreaming( // Handle HTTP errors if (!response.ok) { // Check if this is an invalid model error that we can handle with a fallback - const { isInvalidModel, errorData } = await checkForInvalidModelError( - response, - model, - debug, - ); + const { isInvalidModel, errorData } = await checkForInvalidModelError(response, model, debug); if (isInvalidModel && !isRetry && !options.skipRetry) { if (debug) { - console.log( - `[callAi:${PACKAGE_VERSION}] Invalid model "${model}", falling back to "${FALLBACK_MODEL}"`, - ); + console.log(`[callAi:${PACKAGE_VERSION}] Invalid model "${model}", falling back to "${FALLBACK_MODEL}"`); } // Retry with the fallback model using yield* to delegate to the other generator @@ -634,9 +547,7 @@ async function* callAIStreaming( } // For other errors, throw with details - const errorText = errorData - ? JSON.stringify(errorData) - : `HTTP error! Status: ${response.status}`; + const errorText = errorData ? JSON.stringify(errorData) : `HTTP error! Status: ${response.status}`; throw new Error(errorText); } @@ -649,10 +560,7 @@ async function* callAIStreaming( // Network errors must be directly re-thrown without modification // This is exactly how the original implementation handles it if (debug) { - console.error( - `[callAi:${PACKAGE_VERSION}] Network error during fetch:`, - fetchError, - ); + console.error(`[callAi:${PACKAGE_VERSION}] Network error during fetch:`, fetchError); } // Critical: throw the exact same error object without any wrapping throw fetchError; diff --git a/call-ai/types.ts b/call-ai/types.ts new file mode 100644 index 0000000..f0ab352 --- /dev/null +++ b/call-ai/types.ts @@ -0,0 +1,438 @@ +/** + * Type definitions for call-ai + */ + +export type Falsy = false | null | undefined | 0 | ""; + +export interface OriginalError { + readonly originalError: Error; + readonly refreshError: Error; + readonly status: number; +} + +/** + * Content types for multimodal messages + */ +export interface ContentItem { + readonly type: "text" | "image_url"; + readonly text?: string; + readonly image_url?: { + readonly url: string; + }; +} + +/** + * Message type supporting both simple string content and multimodal content + */ +export interface Message { + readonly role: "user" | "system" | "assistant"; + readonly content: string | ContentItem[]; +} + +/** + * Metadata associated with a response + * Available through the getMeta() helper function + */ +export interface ResponseMeta { + /** + * The model used for the response + */ + model: string; + + /** + * The endpoint used for the response + */ + endpoint?: string; + + /** + * Timing information about the request + */ + timing: { + readonly startTime: number; + endTime?: number; + duration?: number; + }; + + /** + * Raw response data from the fetch call + * Contains the parsed JSON result from the API call + */ + rawResponse?: ModelId | string; +} + +export interface ModelId { + readonly model: string; + readonly id: string; +} + +export interface Schema { + /** + * Optional schema name - will be sent to OpenRouter if provided + * If not specified, defaults to "result" + */ + readonly name?: string; + /** + * Properties defining the structure of your schema + */ + readonly properties: Record; + /** + * Fields that are required in the response (defaults to all properties) + */ + readonly required?: string[]; + /** + * Whether to allow fields not defined in properties (defaults to false) + */ + readonly additionalProperties?: boolean; + /** + * Any additional schema properties to pass through + */ + readonly [key: string]: unknown; +} + +export interface ToolUseType { + readonly type: "tool_use"; + readonly input: string; + readonly tool_calls: OpenAIFunctionCall[]; +} +export function isToolUseType(obj: unknown): obj is ToolUseType { + return !!obj && (obj as ToolUseType).type === "tool_use"; +} + +export interface ToolUseResponse { + readonly tool_use: { + readonly input: string; + }; +} +export function isToolUseResponse(obj: unknown): obj is ToolUseResponse { + return !!obj && (obj as ToolUseResponse).tool_use !== undefined; +} + +export interface AIResult { + choices: { + message: { + content?: string; + function_call: string | ToolUseType | ToolUseResponse; + tool_calls?: string; + }; + text?: string; + }[]; +} + +export interface OpenAIFunctionCall { + readonly type: "function"; + readonly function: { + readonly arguments?: string; + readonly name?: string; + readonly description?: string; + readonly parameters?: RequestSchema | ProcessedSchema; + }; +} + +export function isOpenAIArray(obj: unknown): obj is OpenAIFunctionCall[] { + return Array.isArray(obj) && obj.length > 0 && obj[0].function !== undefined; +} + +export interface RequestSchema { + model?: string; + name?: string; + type: "object"; + description?: string; + properties?: unknown; + required?: unknown[]; + parameters?: RequestSchema; + additionalProperties?: unknown; +} + +export interface SchemaAIMessageRequest { + model: string; + messages: Message[]; + max_tokens: number; + temperature: number; + top_p: number; + stream: boolean; + response_format?: SchemaAIJsonSchemaRequest["response_format"] | SchemaAIJsonObjectRequest["response_format"]; + [key: string]: unknown; +} + +export interface ProcessedSchema { + properties: Record; + items?: ProcessedSchema; + [key: string]: unknown; +} + +export interface SchemaType { + readonly type: string; +} + +export interface SchemaDescription { + readonly description: string; +} + +export interface SchemaAIJsonObjectRequest { + response_format: { + type: "json_object"; + }; +} + +export interface SchemaAIJsonSchemaRequest { + response_format: { + type: "json_schema"; + json_schema: { + name: string; + strict?: boolean; + schema: ProcessedSchema; + }; + }; +} + +interface SchemaAIToolRequest { + tools: OpenAIFunctionCall[]; + tool_choice: OpenAIFunctionCall; +} + +interface SchemaAISimpleMsg { + readonly messages: Message[]; +} + +/** + * Strategy interface for handling different model types + */ +export interface ModelStrategy { + readonly name: string; + readonly prepareRequest: ( + schema: Schema | Falsy, + messages: Message[], + ) => SchemaAISimpleMsg | SchemaAIMessageRequest | SchemaAIToolRequest | SchemaAIJsonSchemaRequest | SchemaAIJsonObjectRequest; + // | undefined; + readonly processResponse: (content: string | ToolUseType | ToolUseResponse | OpenAIFunctionCall[]) => string; + readonly shouldForceStream?: boolean; +} + +export interface CallAIErrorParams { + readonly message: string; + readonly status: number; + readonly statusText?: string; + readonly details?: unknown; + readonly contentType?: string; + readonly statusCode?: number; + readonly response?: { + readonly status: number; + }; + readonly partialContent?: string; + readonly name?: string; + readonly cause?: unknown; + readonly originalError?: CallAIErrorParams | Error; + readonly refreshError?: unknown; + readonly errorType?: string; +} +export class CallAIError { + readonly message: string; + readonly status: number; + readonly statusText?: string; + readonly details?: unknown; + readonly contentType?: string; + readonly originalError?: CallAIErrorParams | Error; + readonly refreshError?: unknown; + readonly errorType?: string; + readonly partialContent?: string; + + constructor(params: CallAIErrorParams) { + this.message = params.message; + this.status = params.status; + this.statusText = params.statusText; + this.details = params.details; + this.contentType = params.contentType; + this.originalError = params.originalError; + this.partialContent = params.partialContent; + this.refreshError = params.refreshError; + this.errorType = params.errorType; + } +} + +/** + * Schema strategies for different model types + */ +export type SchemaStrategyType = "json_schema" | "tool_mode" | "system_message" | "none"; + +/** + * Strategy selection result + */ +export interface SchemaStrategy { + readonly strategy: SchemaStrategyType; + readonly model: string; + readonly prepareRequest: ModelStrategy["prepareRequest"]; + readonly processResponse: ModelStrategy["processResponse"]; + readonly shouldForceStream: boolean; +} + +/** + * Return type for streaming API calls + */ +export type StreamResponse = AsyncGenerator; + +/** + * @internal + * Internal type for backward compatibility with v0.6.x + * This type is not exposed in public API documentation + */ +export type ThenableStreamResponse = AsyncGenerator & Promise; + +export interface CallAIOptions { + /** + * API key for authentication + */ + readonly apiKey?: string; + + /** + * Model ID to use for the request + */ + readonly model?: string; + + /** + * API endpoint to send the request to + */ + readonly endpoint?: string; + + /** + * Custom origin for chat API + * Can also be set via window.CALLAI_CHAT_URL or callAiEnv.CALLAI_CHAT_URL + */ + readonly chatUrl?: string; + + /** + * Whether to stream the response + */ + stream?: boolean; + + /** + * Authentication token for key refresh service + * Can also be set via window.CALL_AI_REFRESH_TOKEN, callAiEnv.CALL_AI_REFRESH_TOKEN, or default to "use-vibes" + */ + refreshToken?: string; + + /** + * Callback function to update refresh token when current token fails + * Gets called with the current failing token and should return a new token + * @param currentToken The current refresh token that failed + * @returns A Promise that resolves to a new refresh token + */ + readonly updateRefreshToken?: (currentToken: string) => Promise; + + /** + * Schema for structured output + */ + readonly schema?: Schema | null; + + /** + * Modalities to enable in the response (e.g., ["image", "text"]) + * Used for multimodal models that can generate images + */ + readonly modalities?: string[]; + + /** + * Whether to skip retry with fallback model when model errors occur + * Useful in testing and cases where retries should be suppressed + */ + readonly skipRetry?: boolean; + + /** + * Skip key refresh on 4xx errors + * Useful for testing error conditions or when you want to handle refresh manually + */ + readonly skipRefresh?: boolean; + + /** + * Enable raw response logging without any filtering or processing + */ + readonly debug?: boolean; + + readonly referer?: string; + readonly title?: string; + + readonly schemaStrategy?: SchemaStrategy; + + readonly maxTokens?: number; + temperature?: number; + readonly topP?: number; + response_format?: { type: "json_object" }; + + /** + * Any additional options to pass to the API + */ + [key: string]: unknown; +} + +export interface AIResponse { + readonly text: string; + readonly usage?: { + readonly promptTokens: number; + readonly completionTokens: number; + readonly totalTokens: number; + }; + readonly model: string; +} + +/** + * Response from image generation API + */ +export interface ImageResponse { + readonly created: number; + readonly data: { + readonly b64_json: string; + readonly url?: string; + readonly revised_prompt?: string; + }[]; +} + +/** + * Options for image generation + */ +export interface ImageGenOptions { + /** + * API key for authentication + * Defaults to "VIBES_DIY" + */ + readonly apiKey?: string; + + /** + * Model to use for image generation + * Defaults to "gpt-image-1" + */ + readonly model?: string; + + /** + * Size of the generated image + */ + readonly size?: string; + + /** + * Quality of the generated image + */ + readonly quality?: string; + + /** + * Style of the generated image + */ + readonly style?: string; + + /** + * For image editing: array of File objects to be edited + */ + readonly images?: File[]; + + /** + * Custom base URL for the image generation API + * Can also be set via window.CALLAI_IMG_URL or callAiEnv.CALLAI_IMG_URL + */ + readonly imgUrl?: string; + + /** + * Enable debug logging + */ + readonly debug?: boolean; +} + +/** + * @deprecated Use ImageGenOptions instead + */ +export type ImageEditOptions = ImageGenOptions; diff --git a/call-ai/utils.ts b/call-ai/utils.ts new file mode 100644 index 0000000..2d699b2 --- /dev/null +++ b/call-ai/utils.ts @@ -0,0 +1,135 @@ +/** + * Utility functions for call-ai + */ + +import { ProcessedSchema } from "./types.js"; + +/** + * Recursively adds additionalProperties: false to all object types in a schema + * This is needed for OpenAI's strict schema validation in streaming mode + */ +export function recursivelyAddAdditionalProperties(schema: ProcessedSchema): ProcessedSchema { + // Clone to avoid modifying the original + const result = { ...schema }; + + // If this is an object type, ensure it has additionalProperties: false + if (result.type === "object") { + // Set additionalProperties if not already set + if (result.additionalProperties === undefined) { + result.additionalProperties = false; + } + + // Process nested properties if they exist + if (result.properties) { + result.properties = { ...result.properties }; + + // Set required if not already set - OpenAI requires this for all nested objects + if (result.required === undefined) { + result.required = Object.keys(result.properties); + } + + // Check each property + Object.keys(result.properties).forEach((key) => { + const prop = result.properties[key]; + + // If property is an object or array type, recursively process it + if (prop && typeof prop === "object") { + const oprop = prop as ProcessedSchema; + result.properties[key] = recursivelyAddAdditionalProperties(oprop); + + // For nested objects, ensure they also have all properties in their required field + if (oprop.type === "object" && oprop.properties) { + oprop.required = Object.keys(oprop.properties); + } + } + }); + } + } + + // Handle nested objects in arrays + if (result.type === "array" && result.items && typeof result.items === "object") { + result.items = recursivelyAddAdditionalProperties(result.items); + + // If array items are objects, ensure they have all properties in required + if (result.items.type === "object" && result.items.properties) { + result.items.required = Object.keys(result.items.properties); + } + } + + return result; +} + +class CallAIEnv { + private getEnv(key: string): string | undefined { + const wEnv = this.getEnvFromWindow(key); + if (wEnv) return wEnv; + + if (process && process.env) { + return process.env[key]; + } + console.warn("[callAi] Environment variable not found:", key); + return undefined; + } + + private getWindow(): (Window & { callAi?: { API_KEY: string } }) | undefined { + return globalThis.window ? globalThis.window : undefined; + } + + private getEnvFromWindow(key: string): string | undefined { + const window = this.getWindow(); + if (window && key in window) { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return (window as any)[key]; + } + return undefined; + } + + readonly def = { + get CALLAI_REFRESH_ENDPOINT() { + // ugly as hell but useful + return callAiEnv.CALLAI_REFRESH_ENDPOINT ?? "https://vibecode.garden"; + }, + }; + + get CALLAI_IMG_URL() { + return this.getEnv("CALLAI_IMG_URL"); + } + + get CALLAI_CHAT_URL() { + return this.getEnv("CALLAI_CHAT_URL"); + } + + get CALLAI_API_KEY() { + return ( + this.getEnv("CALLAI_API_KEY") ?? + this.getEnv("OPENROUTER_API_KEY") ?? + this.getWindow()?.callAi?.API_KEY ?? + this.getEnv("LOW_BALANCE_OPENROUTER_API_KEY") + ); + } + get CALLAI_REFRESH_ENDPOINT() { + return this.getEnv("CALLAI_REFRESH_ENDPOINT"); + } + get CALL_AI_REFRESH_TOKEN() { + return this.getEnv("CALL_AI_REFRESH_TOKEN"); + } + + get CALLAI_REKEY_ENDPOINT() { + return this.getEnv("CALLAI_REKEY_ENDPOINT"); + } + get CALL_AI_KEY_TOKEN() { + return this.getEnv("CALL_AI_KEY_TOKEN"); + } + get CALLAI_REFRESH_TOKEN() { + return this.getEnv("CALLAI_REFRESH_TOKEN"); + } + get CALLAI_DEBUG() { + return !!this.getEnv("CALLAI_DEBUG"); + } + + get NODE_ENV() { + return this.getEnv("NODE_ENV"); + } +} + +export const callAiEnv = new CallAIEnv(); diff --git a/call-ai/version.ts b/call-ai/version.ts new file mode 100644 index 0000000..eb0f486 --- /dev/null +++ b/call-ai/version.ts @@ -0,0 +1 @@ +export const PACKAGE_VERSION = "0.0.1"; diff --git a/eslint.config.mjs b/eslint.config.mjs new file mode 100644 index 0000000..fd8e100 --- /dev/null +++ b/eslint.config.mjs @@ -0,0 +1,50 @@ +import eslint from "@eslint/js"; +import tseslint from "typescript-eslint"; +import importPlugin from "eslint-plugin-import"; + +const opts = tseslint.config( + eslint.configs.recommended, + // ...tseslint.configs.recommended, + ...tseslint.configs.strict, + ...tseslint.configs.stylistic, + { + languageOptions: { + globals: { + queueMicrotask: "readonly", + }, + }, + }, + { + ignores: [ + "babel.config.cjs", + "jest.config.js", + "**/dist/", + "**/pubdir/", + "**/node_modules/", + "**/scripts/", + "**/examples/", + "scripts/", + "smoke/react/", + "src/missingTypes/lib.deno.d.ts", + "**/.cache/**", + "**/.esm-cache/**", + "**/.wrangler/**", + ], + }, + { + plugins: { + import: importPlugin, + }, + rules: { + // "no-console": ["warn"], + "import/no-duplicates": ["error"], + }, + }, + { + rules: { + // "no-restricted-globals": ["error", "URL", "TextDecoder", "TextEncoder"], + }, + }, +); + +export default opts; diff --git a/jest.config.js b/jest.config.js index eb5d197..fd4e8a3 100644 --- a/jest.config.js +++ b/jest.config.js @@ -1,16 +1,12 @@ module.exports = { - preset: 'ts-jest', - testEnvironment: 'node', - testMatch: ['**/src/**/*.test.ts', '**/test/**/*.test.ts'], + preset: "ts-jest", + testEnvironment: "node", + testMatch: ["**/src/**/*.test.ts", "**/test/**/*.test.ts"], // We ignore integration tests by default for normal test runs // but the test:integration script explicitly specifies this file, // which will override this pattern - testPathIgnorePatterns: ['/node_modules/', '/test/integration.test.ts'], - collectCoverageFrom: [ - 'src/**/*.ts', - '!src/**/*.d.ts', - '!src/**/*.test.ts', - ], + testPathIgnorePatterns: ["/node_modules/", "/test/integration.test.ts"], + collectCoverageFrom: ["src/**/*.ts", "!src/**/*.d.ts", "!src/**/*.test.ts"], coverageThreshold: { global: { branches: 55, @@ -19,4 +15,4 @@ module.exports = { statements: 65, }, }, -}; \ No newline at end of file +}; diff --git a/model-chooser-new.md b/model-chooser-new.md index e326dab..2b919c6 100644 --- a/model-chooser-new.md +++ b/model-chooser-new.md @@ -28,40 +28,39 @@ Based on our testing, here's the strategy for reliable structured JSON output ac function chooseSchemaStrategy(model, schema) { // Extract model family from the full model name const modelFamily = getModelFamily(model); - - if (modelFamily === 'openai') { + + if (modelFamily === "openai") { return { - strategy: 'json_schema', + strategy: "json_schema", schema: formatOpenAISchema(schema), - response_format: { type: 'json_object' } + response_format: { type: "json_object" }, }; } - - if (modelFamily === 'gemini') { + + if (modelFamily === "gemini") { return { - strategy: 'json_schema', - schema: formatOpenAISchema(schema) // Same format works for Gemini + strategy: "json_schema", + schema: formatOpenAISchema(schema), // Same format works for Gemini }; } - - if (modelFamily === 'anthropic') { + + if (modelFamily === "anthropic") { return { - strategy: 'tool_mode', + strategy: "tool_mode", tools: [formatClaudeToolSchema(schema)], - stream: true // Force streaming for Claude + stream: true, // Force streaming for Claude }; } - + // Default fallback for all other models return { - strategy: 'system_message', - systemPrompt: formatSchemaAsSystemPrompt(schema) + strategy: "system_message", + systemPrompt: formatSchemaAsSystemPrompt(schema), }; } // end Pseudocode ``` - ## Benefits of This Approach - **Maximum Compatibility**: Works across all major AI providers @@ -83,6 +82,7 @@ The current implementation in `index.ts` uses multiple conditional checks and fl ### Simplifying Model Detection Current approach: + ```typescript // Multiple individual model detection flags const isClaudeModel = options.model ? /claude/i.test(options.model) : false; @@ -100,6 +100,7 @@ const useJsonSchemaApproach = (isOpenAIModel || isGeminiModel) && options.schema ``` Refactored approach: + ```typescript const schemaStrategy = chooseSchemaStrategy(options.model, options.schema); // Use schemaStrategy.strategy to determine the approach @@ -131,7 +132,7 @@ const schemaStrategy = chooseSchemaStrategy(options.model, options.schema); const isGeminiModel = options.model ? /gemini/i.test(options.model) : false; const isLlama3Model = options.model ? /llama-3/i.test(options.model) : false; const isDeepSeekModel = options.model ? /deepseek/i.test(options.model) : false; - + if (needsJsonExtraction) { ... } ``` @@ -145,11 +146,11 @@ const schemaStrategy = chooseSchemaStrategy(options.model, options.schema); } // Would be replaced with simpler strategy-based approach that HIDES implementation details - if (schemaStrategy.strategy === 'tool_mode' && schemaStrategy.stream && !options.stream) { + if (schemaStrategy.strategy === "tool_mode" && schemaStrategy.stream && !options.stream) { // Internally use streaming but buffer results for the caller const originalStream = options.stream; options.stream = true; // Force streaming internally - + // If caller requested non-streaming, we need to buffer and return complete result if (!originalStream) { return bufferStreamingToSingleResponse(callAIInternal(prompt, options)); @@ -167,7 +168,7 @@ const schemaStrategy = chooseSchemaStrategy(options.model, options.schema); ```typescript // Logic like this appears in both callAINonStreaming and callAIStreaming - if (useToolMode && result.stop_reason === 'tool_use') { + if (useToolMode && result.stop_reason === "tool_use") { // Extract the tool use content... } ``` @@ -188,9 +189,10 @@ This approach would make the code more modular, easier to test, and simpler to e The fundamental purpose of `callAi` is to provide a simple, consistent interface that shields users from the underlying complexity of different AI models and their unique implementation requirements. Key principles: + - **Simple API, Complex Implementation**: Users should have a straightforward experience while the library handles the intricate details. - **Respecting User Options**: When a user specifies options like `stream: false`, the API contract should be honored even if implementation details (like using streaming internally) differ. - **Consistent Results**: The same JSON schema should produce well-structured results across all supported models without requiring model-specific code from users. - **Intelligent Defaults**: The library should automatically select the best approach for each model while allowing overrides when needed. -By refactoring to use the strategy pattern and properly abstracting the model-specific details, `callAi` can maintain its promise of simplicity while supporting an increasingly diverse ecosystem of AI models and their unique capabilities. \ No newline at end of file +By refactoring to use the strategy pattern and properly abstracting the model-specific details, `callAi` can maintain its promise of simplicity while supporting an increasingly diverse ecosystem of AI models and their unique capabilities. diff --git a/notes/changes.md b/notes/changes.md index 6336a83..adbe304 100644 --- a/notes/changes.md +++ b/notes/changes.md @@ -1,6 +1,7 @@ # Changes Summary: Current HEAD vs dc3f088 ## Files Changed + - `.env.example` (New file): Added template for API keys - `README.md`: Added integration test documentation - `jest.config.js`: Updated test matching patterns @@ -13,27 +14,32 @@ ## Major Changes ### Enhanced Model Support + - Added explicit support for Claude models with special handling for structured output - Improved model selection logic with better defaults based on schema requirements - Added detection for Claude models to adjust output formatting ### Integration Tests + - Added comprehensive integration test suite that makes real API calls - Created separate test scripts to avoid running API calls during CI/CD - Added tests for different model providers (OpenAI, Claude, Gemini) - Added structured output validation for various schema formats ### Environment Configuration + - Added `.env.example` with template for API keys - Added dotenv dependency for loading environment variables - Support for both OPENROUTER_API_KEY and CALLAI_API_KEY ### Code Organization + - Moved tests to dedicated test directory - Separated unit tests from integration tests - Added test patterns to exclude integration tests from regular test runs ### API Updates + - Version bump to 0.4.1 - Updated license information -- Enhanced schema handling for better compatibility with different providers \ No newline at end of file +- Enhanced schema handling for better compatibility with different providers diff --git a/notes/claude-tool-use.md b/notes/claude-tool-use.md index f29e243..582db67 100644 --- a/notes/claude-tool-use.md +++ b/notes/claude-tool-use.md @@ -48,10 +48,7 @@ const isClaudeModel = options.model ? /claude/i.test(options.model) : false; const useToolMode = isClaudeModel && options.schema; // Models that should use system message approach for structured output -const useSystemMessageApproach = - isLlama3Model || - isDeepSeekModel || - isGPT4TurboModel; +const useSystemMessageApproach = isLlama3Model || isDeepSeekModel || isGPT4TurboModel; ``` With this change, Claude models automatically use tool mode when a schema is provided, OpenAI models use the JSON Schema approach, and other models use the system message approach. @@ -61,39 +58,42 @@ With this change, Claude models automatically use tool mode when a schema is pro The current model switching could be simplified in several ways: 1. **Simplify Model Detection**: Replace multiple boolean flags with a single model category: + ```typescript // Current approach with multiple flags const isClaudeModel = options.model ? /claude/i.test(options.model) : false; const isGeminiModel = options.model ? /gemini/i.test(options.model) : false; const isLlama3Model = options.model ? /llama-3/i.test(options.model) : false; // etc. - + // Simplified approach const modelCategory = getModelCategory(options.model); // Returns: 'claude', 'openai', 'gemini', 'llama', 'other', etc. ``` 2. **Schema Strategy Map**: Define schema strategies by model category: + ```typescript const schemaStrategies = { - claude: 'tool', // Uses tool mode - openai: 'json_schema', // Uses response_format.json_schema - gemini: 'system_message', // Uses system message approach - llama: 'system_message', - default: 'system_message' + claude: "tool", // Uses tool mode + openai: "json_schema", // Uses response_format.json_schema + gemini: "system_message", // Uses system message approach + llama: "system_message", + default: "system_message", }; - + const strategy = schemaStrategies[modelCategory] || schemaStrategies.default; ``` 3. **Strategy Pattern**: Implement a strategy pattern for different schema approaches: + ```typescript const schemaHandlers = { tool: applyToolMode, json_schema: applyJsonSchema, - system_message: applySystemMessage + system_message: applySystemMessage, }; - + // Then use the appropriate handler const handler = schemaHandlers[strategy]; handler(requestParams, schema, messages); @@ -103,7 +103,7 @@ This approach would make the codebase more maintainable and easier to extend wit ## Conclusion -Tool mode is now the default schema enforcement approach for Claude models in the call-ai library. This provides a more "native" way for Claude to generate structured outputs, though there may be considerations around API provider compatibility and performance. +Tool mode is now the default schema enforcement approach for Claude models in the call-ai library. This provides a more "native" way for Claude to generate structured outputs, though there may be considerations around API provider compatibility and performance. The model detection and routing logic could be simplified with a more structured approach to categorizing models and their schema strategies, potentially making the codebase more maintainable. diff --git a/notes/human.md b/notes/human.md index 756e2f5..0a6c979 100644 --- a/notes/human.md +++ b/notes/human.md @@ -2,4 +2,4 @@ See [testing.md](testing.md) for details on the test suite. -See [refactor.md](refactor.md) for details on the refactoring. \ No newline at end of file +See [refactor.md](refactor.md) for details on the refactoring. diff --git a/notes/key-refresh.md b/notes/key-refresh.md index 6a0e353..d06f63a 100644 --- a/notes/key-refresh.md +++ b/notes/key-refresh.md @@ -10,12 +10,15 @@ This document outlines the API endpoints and patterns used for managing API keys ## 1. Key Creation API ### Endpoint + ``` POST ${API_ORIGIN}/api/keys ``` + Where `API_ORIGIN` defaults to `https://vibecode.garden` or uses the local path `/api/keys` for same-origin requests. ### Request Headers + ```javascript { 'Content-Type': 'application/json', @@ -24,6 +27,7 @@ Where `API_ORIGIN` defaults to `https://vibecode.garden` or uses the local path ``` ### Request Body + ```javascript { userId: string | undefined, // Optional user ID to associate with the key @@ -33,6 +37,7 @@ Where `API_ORIGIN` defaults to `https://vibecode.garden` or uses the local path ``` ### Response Format + ```typescript { key: string, // The actual API key to use for OpenRouter @@ -48,13 +53,15 @@ Where `API_ORIGIN` defaults to `https://vibecode.garden` or uses the local path ``` ### Error Response + ```javascript { - error: string // Error message describing what went wrong + error: string; // Error message describing what went wrong } ``` ### Implementation Notes + - The key is stored in localStorage with a timestamp to track its age - Keys are considered valid for 7 days - The system uses a module-level Promise to deduplicate simultaneous key creation requests @@ -63,11 +70,13 @@ Where `API_ORIGIN` defaults to `https://vibecode.garden` or uses the local path ## 2. Credits Check API ### Endpoint + ``` GET https://openrouter.ai/api/v1/auth/key ``` ### Request Headers + ```javascript { 'Authorization': `Bearer ${apiKey}`, // The API key to check @@ -76,7 +85,9 @@ GET https://openrouter.ai/api/v1/auth/key ``` ### Response Format + The raw response has varying structures, but is normalized to: + ```typescript { available: number, // Available credits (limit - usage) @@ -86,10 +97,12 @@ The raw response has varying structures, but is normalized to: ``` ### Error Handling + - 401 Unauthorized: The API key is invalid - 429 Too Many Requests: Rate limited, need to implement backoff ### Implementation Notes + - The system warns when credits are running low (available < 0.2) - Request deduplication is used to prevent multiple simultaneous credit checks - Detailed error information is extracted from the response when available @@ -97,18 +110,22 @@ The raw response has varying structures, but is normalized to: ## Usage Patterns ### Key Lifecycle + 1. Check localStorage for existing valid key 2. If no valid key exists, create a new one via Edge Function 3. Store the key with timestamp in localStorage 4. Use the key for API requests to OpenRouter ### Managing Rate Limits + 1. Track API request timestamps in localStorage 2. Implement backoff periods when rate limited (default: 10 seconds) 3. Clear backoff timers after successful requests ### Request Deduplication + Module-level Promise variables are used to track in-flight requests: + ```javascript let pendingKeyRequest: Promise | null = null; let pendingCreditsCheck: Promise | null = null; @@ -119,6 +136,7 @@ This ensures that multiple components requesting keys or checking credits will s ## Refresh Token Management ### Overview + The API key refresh system uses a refresh token for authentication when requesting new API keys. When the refresh token itself becomes invalid, the system now supports dynamically obtaining a new refresh token through a callback mechanism. ### Configuration Options @@ -126,7 +144,7 @@ The API key refresh system uses a refresh token for authentication when requesti ```typescript interface CallAIOptions { // Other options... - + /** * Authentication token for key refresh service * Can also be set via window.CALL_AI_REFRESH_TOKEN, process.env.CALL_AI_REFRESH_TOKEN, or default to "use-vibes" @@ -160,14 +178,14 @@ await callAi("Tell me about France", { refreshToken: "initial-token", updateRefreshToken: async (failedToken) => { console.log(`Token ${failedToken} failed, getting new token...`); - + // Example implementation: call an authentication service const response = await fetch("https://your-auth-service.com/refresh", { method: "POST", headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ oldToken: failedToken }) + body: JSON.stringify({ oldToken: failedToken }), }); - + const data = await response.json(); return data.newToken; }, @@ -176,9 +194,9 @@ await callAi("Tell me about France", { properties: { capital: { type: "string" }, population: { type: "number" }, - languages: { type: "array", items: { type: "string" } } - } - } + languages: { type: "array", items: { type: "string" } }, + }, + }, }); ``` diff --git a/notes/model-chooser.md b/notes/model-chooser.md index 037adf4..82227f8 100644 --- a/notes/model-chooser.md +++ b/notes/model-chooser.md @@ -38,6 +38,7 @@ const useToolMode = isClaudeModel && !!options.schema; ``` When using Claude with a schema, the library uses Claude's tool use capability: + - Converts schema to a tool definition with function type - Sets `tool_choice` to force Claude to use the tool - Schema properties are converted to a proper JSON schema format @@ -50,6 +51,7 @@ const useSystemMessageApproach = isLlama3Model || isDeepSeekModel || isGPT4Turbo ``` For models that don't fully support JSON schema: + - Used for Llama 3, DeepSeek, and GPT-4 Turbo - Injects schema into a system message as instructions - Formats schema as text with property types and descriptions @@ -62,6 +64,7 @@ const useJsonSchemaApproach = (isOpenAIModel || isGeminiModel) && options.schema ``` For models with native schema support (OpenAI/GPT and Gemini): + - Uses OpenAI's `response_format` with `json_schema` type - Processes schema for compatibility - Recursively adds `additionalProperties: false` to all nested objects @@ -73,11 +76,13 @@ For models with native schema support (OpenAI/GPT and Gemini): The library handles responses differently based on model type and whether streaming is enabled: ### Non-streaming Mode + - For Claude with tool use: extracts JSON from tool use blocks - For all models with schema: processes content based on model type and extracts JSON - Applies special handling for models that might wrap JSON in markdown code blocks ### Streaming Mode + - Assembles partial responses incrementally - Handles model-specific streaming formats - For Claude with tool use in streaming: shows warning that it may not work optimally @@ -90,13 +95,15 @@ const needsJsonExtraction = isClaudeModel || isGeminiModel || isLlama3Model || i ``` For models that might return formatted text instead of direct JSON: + - Extracts JSON from markdown code blocks -- Handles various wrapper formats (```json, ```, or raw JSON objects) +- Handles various wrapper formats (`json, `, or raw JSON objects) - Returns the extracted JSON or falls back to the original content ## Schema Processing The library recursively processes schemas to ensure all nested objects have appropriate properties: + - Sets `additionalProperties: false` by default - Ensures all nested objects have required fields properly defined - Handles arrays of objects by processing their item schemas diff --git a/notes/model-switching-refactor.md b/notes/model-switching-refactor.md index 31fbf8f..e4d627c 100644 --- a/notes/model-switching-refactor.md +++ b/notes/model-switching-refactor.md @@ -28,19 +28,19 @@ Replace the boolean flags with a single function that categorizes models: /** * Determine the model family from a model string */ -function getModelFamily(modelString: string | undefined): 'claude' | 'openai' | 'gemini' | 'llama' | 'deepseek' | 'unknown' { - if (!modelString) return 'unknown'; - +function getModelFamily(modelString: string | undefined): "claude" | "openai" | "gemini" | "llama" | "deepseek" | "unknown" { + if (!modelString) return "unknown"; + // Lowercase for case-insensitive comparison const model = modelString.toLowerCase(); - - if (model.includes('claude')) return 'claude'; - if (model.includes('gpt') || model.includes('openai')) return 'openai'; - if (model.includes('gemini')) return 'gemini'; - if (model.includes('llama')) return 'llama'; - if (model.includes('deepseek')) return 'deepseek'; - - return 'unknown'; + + if (model.includes("claude")) return "claude"; + if (model.includes("gpt") || model.includes("openai")) return "openai"; + if (model.includes("gemini")) return "gemini"; + if (model.includes("llama")) return "llama"; + if (model.includes("deepseek")) return "deepseek"; + + return "unknown"; } ``` @@ -52,23 +52,23 @@ Define a mapping of model families to schema strategies: /** * Schema strategy types supported by the library */ -type SchemaStrategy = 'tool' | 'json_schema' | 'system_message'; +type SchemaStrategy = "tool" | "json_schema" | "system_message"; /** * Maps model families to their preferred schema strategies */ const modelSchemaStrategies: Record = { - 'claude': 'tool', // Claude uses tool mode - 'openai': 'json_schema', // OpenAI uses response_format.json_schema - 'gemini': 'system_message', // Gemini uses system message - 'llama': 'system_message', // Llama uses system message - 'deepseek': 'system_message', // Deepseek uses system message - 'unknown': 'system_message' // Default to system message for unknown models + claude: "tool", // Claude uses tool mode + openai: "json_schema", // OpenAI uses response_format.json_schema + gemini: "system_message", // Gemini uses system message + llama: "system_message", // Llama uses system message + deepseek: "system_message", // Deepseek uses system message + unknown: "system_message", // Default to system message for unknown models }; // Get the appropriate strategy for a model const modelFamily = getModelFamily(options.model); -const schemaStrategy = options.forceJsonSchema ? 'json_schema' : modelSchemaStrategies[modelFamily]; +const schemaStrategy = options.forceJsonSchema ? "json_schema" : modelSchemaStrategies[modelFamily]; ``` ### 3. Strategy Pattern Implementation @@ -79,108 +79,91 @@ Extract the schema application logic into separate functions for each strategy: /** * Apply tool mode schema strategy to request parameters */ -function applyToolModeStrategy( - requestParams: any, - schema: Schema, - messages: Message[] -): void { +function applyToolModeStrategy(requestParams: any, schema: Schema, messages: Message[]): void { console.log(`[DEBUG] Using tool mode for ${requestParams.model}`); - + // Process schema for tool use const processedSchema = { - type: 'object', + type: "object", properties: schema.properties || {}, required: Object.keys(schema.properties || {}), - additionalProperties: schema.additionalProperties !== undefined - ? schema.additionalProperties - : false, + additionalProperties: schema.additionalProperties !== undefined ? schema.additionalProperties : false, }; - + // Add tools parameter - requestParams.tools = [{ - name: schema.name || 'generate_structured_data', - description: 'Generate data according to the required schema', - input_schema: processedSchema - }]; - + requestParams.tools = [ + { + name: schema.name || "generate_structured_data", + description: "Generate data according to the required schema", + input_schema: processedSchema, + }, + ]; + // Force use of the tool requestParams.tool_choice = { - type: 'tool', - name: schema.name || 'generate_structured_data' + type: "tool", + name: schema.name || "generate_structured_data", }; } /** * Apply JSON schema strategy to request parameters */ -function applyJsonSchemaStrategy( - requestParams: any, - schema: Schema, - messages: Message[], - modelFamily: string -): void { +function applyJsonSchemaStrategy(requestParams: any, schema: Schema, messages: Message[], modelFamily: string): void { console.log(`[DEBUG] Using json_schema approach for ${requestParams.model}`); - + // For Claude, ensure all fields are included in 'required' let requiredFields = schema.required || []; - if (modelFamily === 'claude') { + if (modelFamily === "claude") { requiredFields = Object.keys(schema.properties || {}); } else { requiredFields = schema.required || Object.keys(schema.properties || {}); } - + const processedSchema = recursivelyAddAdditionalProperties({ - type: 'object', + type: "object", properties: schema.properties || {}, required: requiredFields, - additionalProperties: schema.additionalProperties !== undefined - ? schema.additionalProperties - : false, + additionalProperties: schema.additionalProperties !== undefined ? schema.additionalProperties : false, ...Object.fromEntries( - Object.entries(schema).filter(([key]) => - !['name', 'properties', 'required', 'additionalProperties'].includes(key) - ) - ) + Object.entries(schema).filter(([key]) => !["name", "properties", "required", "additionalProperties"].includes(key)), + ), }); - + requestParams.response_format = { - type: 'json_schema', + type: "json_schema", json_schema: { name: schema.name || "result", strict: true, - schema: processedSchema - } + schema: processedSchema, + }, }; } /** * Apply system message strategy to request parameters */ -function applySystemMessageStrategy( - requestParams: any, - schema: Schema, - messages: Message[] -): void { +function applySystemMessageStrategy(requestParams: any, schema: Schema, messages: Message[]): void { console.log(`[DEBUG] Using system message approach for ${requestParams.model}`); - + // Only add system message if one doesn't already exist - const hasSystemMessage = messages.some(m => m.role === 'system'); - + const hasSystemMessage = messages.some((m) => m.role === "system"); + if (!hasSystemMessage) { // Build a schema description const schemaProperties = Object.entries(schema.properties || {}) .map(([key, value]) => { - const type = (value as any).type || 'string'; - const description = (value as any).description ? ` // ${(value as any).description}` : ''; + const type = (value as any).type || "string"; + const description = (value as any).description ? ` // ${(value as any).description}` : ""; return ` "${key}": ${type}${description}`; }) - .join(',\n'); - + .join(",\n"); + const systemMessage: Message = { - role: 'system', - content: `Please return your response as JSON following this schema exactly:\n{\n${schemaProperties}\n}\nDo not include any explanation or text outside of the JSON object.` + role: "system", + content: `Please return your response as JSON following this schema exactly:\n{\n${schemaProperties}\n}\nDo not include any explanation or text outside of the JSON object.`, }; - + // Update messages and request params const updatedMessages = [systemMessage, ...messages]; requestParams.messages = updatedMessages; @@ -195,42 +178,45 @@ Putting it all together: ```typescript function prepareRequestParams( prompt: string | Message[], - options: CallAIOptions -): { apiKey: string, model: string, endpoint: string, requestOptions: RequestInit } { + options: CallAIOptions, +): { + apiKey: string; + model: string; + endpoint: string; + requestOptions: RequestInit; +} { // ... existing code ... - + // Determine model family and schema strategy const modelFamily = getModelFamily(options.model); - const schemaStrategy = options.forceJsonSchema ? 'json_schema' : modelSchemaStrategies[modelFamily]; - + const schemaStrategy = options.forceJsonSchema ? "json_schema" : modelSchemaStrategies[modelFamily]; + // Handle both string prompts and message arrays - let messages = Array.isArray(prompt) - ? prompt - : [{ role: 'user', content: prompt }]; - + let messages = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt }]; + // Build request parameters const requestParams: any = { model: model, stream: options.stream === true, messages: messages, }; - + // Apply schema strategy if schema is provided if (schema) { switch (schemaStrategy) { - case 'tool': + case "tool": applyToolModeStrategy(requestParams, schema, messages); break; - case 'json_schema': + case "json_schema": applyJsonSchemaStrategy(requestParams, schema, messages, modelFamily); break; - case 'system_message': + case "system_message": default: applySystemMessageStrategy(requestParams, schema, messages); break; } } - + // ... rest of existing code ... } ``` @@ -263,4 +249,4 @@ function prepareRequestParams( This refactoring would significantly improve the maintainability and extensibility of the model switching logic in the call-ai library. It provides a more structured approach to handling different models and schema strategies, making it easier to add support for new models in the future. -By implementing this refactoring, we'll reduce code complexity, improve readability, and make the library more robust against changes in model behavior or new model additions. \ No newline at end of file +By implementing this refactoring, we'll reduce code complexity, improve readability, and make the library more robust against changes in model behavior or new model additions. diff --git a/notes/model-wire.md b/notes/model-wire.md index ca17ab7..58a5f14 100644 --- a/notes/model-wire.md +++ b/notes/model-wire.md @@ -5,15 +5,23 @@ This document captures the differences in how various LLM models handle structur ## OpenAI (GPT-4o) ### JSON Schema Support -- **Fully supports** the JSON schema format + +- **Fully supports** the JSON schema format - Returns clean, valid JSON without any explanatory text - Properly respects the schema structure including required fields and types - Example response content: ```json - {"title":"Where the Crawdads Sing","author":"Delia Owens","year":2018,"genre":"Mystery, Coming-of-age","rating":4.8} + { + "title": "Where the Crawdads Sing", + "author": "Delia Owens", + "year": 2018, + "genre": "Mystery, Coming-of-age", + "rating": 4.8 + } ``` ### Streaming + - Streams the output token by token - Each chunk contains a small part of the JSON string - First chunk initializes the structure `{"`, then builds the JSON incrementally @@ -31,9 +39,11 @@ This document captures the differences in how various LLM models handle structur ## Claude (Claude 3 Sonnet) ### JSON Schema Support + - **Partial support** for the JSON schema format - When using the `json_schema` parameter, Claude often adds explanatory text - Example response with schema: + ``` Sure, here's a short book recommendation in the requested format: @@ -42,9 +52,11 @@ This document captures the differences in how various LLM models handle structur Genre: Fiction, Allegorical novel Description: "The Alchemist" by Paulo Coelho is a beautiful and inspiring story... ``` + - The response doesn't follow the JSON schema format and includes extra information. ### System Message Approach + - **Works well** with the system message approach - Returns clean, valid JSON when instructed via the system message - Example system message response: @@ -61,6 +73,7 @@ This document captures the differences in how various LLM models handle structur ## Gemini (Gemini 2.0 Flash) ### JSON Schema Support + - **Fully supports** the JSON schema format - Returns clean, valid JSON without any explanatory text - Properly follows the schema constraints for fields and types @@ -76,9 +89,11 @@ This document captures the differences in how various LLM models handle structur ``` ### System Message Approach + - **Works well** but adds code fences around the JSON - Returns code-fenced JSON when instructed via system message: - ``` + + ```` ```json { "title": "The Martian", @@ -87,25 +102,30 @@ This document captures the differences in how various LLM models handle structur "genre": "Science Fiction", "rating": 5 } + ```` + ``` + ``` ## Llama3 (Meta Llama 3.3 70B Instruct) ### JSON Schema Support + - **Does not properly support** the JSON schema format - Returns markdown-formatted text descriptions instead of JSON - Ignores the JSON schema structure and provides detailed text explanations - Example response: ``` - **Title:** "The Hitchhiker's Guide to the Galaxy" - **Author:** Douglas Adams - **Genre:** Science Fiction, Comedy - **Description:** An comedic adventure through space following the misadventures of an unwitting human and his alien friend after Earth's destruction. + **Title:** "The Hitchhiker's Guide to the Galaxy" + **Author:** Douglas Adams + **Genre:** Science Fiction, Comedy + **Description:** An comedic adventure through space following the misadventures of an unwitting human and his alien friend after Earth's destruction. **Why Read:** Unique blend of humor and science fiction, with witty observations on human society and the universe. ``` ### System Message Approach + - **Works well** with the system message approach - Returns clean, valid JSON when instructed via system message - Example system message response: @@ -122,27 +142,29 @@ This document captures the differences in how various LLM models handle structur ## DeepSeek (DeepSeek Chat) ### JSON Schema Support + - **Does not properly support** the JSON schema format - Similar to Llama3, returns markdown-formatted text descriptions - Ignores the JSON schema structure and provides text explanations - Example response: ``` - **Title:** *The Alchemist* - **Author:** Paulo Coelho - **Genre:** Fiction, Inspirational + **Title:** *The Alchemist* + **Author:** Paulo Coelho + **Genre:** Fiction, Inspirational **Why I Recommend It:** A timeless tale of self-discovery and pursuing one's dreams, *The Alchemist* is both simple and profound. Its allegorical style and universal themes make it a quick yet impactful read, perfect for anyone seeking motivation or a fresh perspective on life. ``` ### System Message Approach + - **Works well** with the system message approach - Returns clean, valid JSON when instructed via system message - Example system message response: ```json { - "title": "The Great Gatsby", - "author": "F. Scott Fitzgerald", - "year": 1925, - "genre": "Tragedy", + "title": "The Great Gatsby", + "author": "F. Scott Fitzgerald", + "year": 1925, + "genre": "Tragedy", "rating": 4.5 } ``` @@ -150,6 +172,7 @@ This document captures the differences in how various LLM models handle structur ## GPT-4 Turbo (OpenAI GPT-4 Turbo) ### JSON Schema Support + - **Does not support** the JSON schema format - Returns an error when response_format.json_schema is used: ``` @@ -157,6 +180,7 @@ This document captures the differences in how various LLM models handle structur ``` ### System Message Approach + - **Works very well** with the system message approach - Returns clean, properly formatted JSON when instructed via system message - Example system message response: @@ -191,6 +215,7 @@ This document captures the differences in how various LLM models handle structur ## Library Implementation Our library should: + 1. Detect the model type from the model string 2. For Claude, Llama3, DeepSeek, and GPT-4 Turbo: Add fallback to system message approach when schema is requested 3. Handle response post-processing based on model type: @@ -204,7 +229,9 @@ Our library should: ## Implementation Details for Fixing Integration Tests ### Current Failures + We have tests failing for the following reasons: + 1. Llama3, DeepSeek and GPT-4 Turbo models return markdown-formatted text or errors when using JSON schema format 2. Our implementation doesn't automatically use system message approach for these models @@ -215,95 +242,93 @@ We have tests failing for the following reasons: ```typescript function prepareRequestParams( prompt: string | Message[], - options: CallAIOptions = {} -): { endpoint: string, requestOptions: RequestInit } { + options: CallAIOptions = {}, +): { endpoint: string; requestOptions: RequestInit } { // ... existing code ... - + // Detect model type const isClaudeModel = options.model ? /claude/i.test(options.model) : false; const isGeminiModel = options.model ? /gemini/i.test(options.model) : false; const isLlama3Model = options.model ? /llama-3/i.test(options.model) : false; const isDeepSeekModel = options.model ? /deepseek/i.test(options.model) : false; const isGPT4TurboModel = options.model ? /gpt-4-turbo/i.test(options.model) : false; - + // Models that should use system message approach for structured output const useSystemMessageApproach = isClaudeModel || isLlama3Model || isDeepSeekModel || isGPT4TurboModel; - + // Prepare messages - let messages: Message[] = []; - + let messages: Message[] = []; + if (Array.isArray(prompt)) { messages = prompt; } else { // Create a single message - messages = [{ role: 'user', content: prompt as string }]; + messages = [{ role: "user", content: prompt as string }]; } - + // Handle schema for different models if (options.schema) { if (useSystemMessageApproach || options.forceSystemMessage) { // Use system message approach for models that need it const schemaProperties = Object.entries(options.schema.properties || {}) .map(([key, value]) => { - const type = (value as any).type || 'string'; + const type = (value as any).type || "string"; return ` "${key}": ${type}`; }) - .join(',\n'); - + .join(",\n"); + const systemMessage: Message = { - role: 'system', - content: `Please return your response as JSON following this schema exactly:\n{\n${schemaProperties}\n}\nDo not include any explanation or text outside of the JSON object.` + role: "system", + content: `Please return your response as JSON following this schema exactly:\n{\n${schemaProperties}\n}\nDo not include any explanation or text outside of the JSON object.`, }; - + // Add system message at the beginning if none exists - if (!messages.some(m => m.role === 'system')) { + if (!messages.some((m) => m.role === "system")) { messages = [systemMessage, ...messages]; } } else { // For OpenAI GPT-4o and Gemini, use the schema format requestParams.response_format = { - type: 'json_schema', + type: "json_schema", json_schema: { - name: options.schema.name || 'response', + name: options.schema.name || "response", schema: { - type: 'object', + type: "object", properties: options.schema.properties || {}, required: options.schema.required || Object.keys(options.schema.properties || {}), - additionalProperties: options.schema.additionalProperties !== undefined - ? options.schema.additionalProperties - : false, - } - } + additionalProperties: options.schema.additionalProperties !== undefined ? options.schema.additionalProperties : false, + }, + }, }; } } - + // ... rest of the function ... } ``` 2. **Update response handling to detect and process model-specific formats**: -```typescript +````typescript async function processResponseContent(content: string, options: CallAIOptions = {}): Promise { // Detect model type const isClaudeModel = options.model ? /claude/i.test(options.model) : false; const isGeminiModel = options.model ? /gemini/i.test(options.model) : false; const isLlama3Model = options.model ? /llama-3/i.test(options.model) : false; const isDeepSeekModel = options.model ? /deepseek/i.test(options.model) : false; - + // For models that might return formatted text instead of JSON const needsJsonExtraction = isClaudeModel || isGeminiModel || isLlama3Model || isDeepSeekModel; - + if (needsJsonExtraction && options.schema) { // Try to extract JSON from content if it might be wrapped - const jsonMatch = content.match(/```json\s*([\s\S]*?)\s*```/) || - content.match(/```\s*([\s\S]*?)\s*```/) || - content.match(/\{[\s\S]*\}/) || - [null, content]; - + const jsonMatch = content.match(/```json\s*([\s\S]*?)\s*```/) || + content.match(/```\s*([\s\S]*?)\s*```/) || + content.match(/\{[\s\S]*\}/) || [null, content]; + return jsonMatch[1] || content; } - + return content; -} \ No newline at end of file +} +```` diff --git a/notes/rekey-impl.md b/notes/rekey-impl.md index a2d0ac3..b6e02b7 100644 --- a/notes/rekey-impl.md +++ b/notes/rekey-impl.md @@ -22,7 +22,7 @@ Update the type definitions to include a bypass flag for key refresh: // In src/types.ts export interface CallAIOptions { // Existing options... - + /** * Skip key refresh on 4xx errors * Useful for testing error conditions or when you want to handle refresh manually @@ -44,23 +44,23 @@ CALL_AI_REFRESH_TOKEN=use-vibes # Default auth token ```typescript async function refreshApiKey( - currentKey: string | null, + currentKey: string | null, endpoint: string, - refreshToken: string | null + refreshToken: string | null, ): Promise<{ apiKey: string; topup: boolean }> { try { // Prepare headers with authentication const headers: Record = { - 'Content-Type': 'application/json' + "Content-Type": "application/json", }; - + // Use the refresh token for authentication if (refreshToken) { - headers['Authorization'] = `Bearer ${refreshToken}`; + headers["Authorization"] = `Bearer ${refreshToken}`; } else { throw new Error("Refresh token is required for key creation"); } - + // Extract hash from current key if available (for potential future top-up capability) let keyHash = null; if (currentKey) { @@ -73,33 +73,33 @@ async function refreshApiKey( console.warn("Could not extract hash from current key, will create new key"); } } - + // Determine if this might be a top-up request based on available hash const isTopupAttempt = Boolean(keyHash); - + // Create the request body const requestBody: any = { - userId: 'anonymous', // Replace with actual user ID if available - name: 'Session Key', - label: `session-${Date.now()}` + userId: "anonymous", // Replace with actual user ID if available + name: "Session Key", + label: `session-${Date.now()}`, }; - + // If we have a key hash and want to attempt top-up (for future implementation) if (isTopupAttempt) { requestBody.keyHash = keyHash; - requestBody.action = 'topup'; // Signal that we're trying to top up existing key + requestBody.action = "topup"; // Signal that we're trying to top up existing key } - + // Append the specific API path to the base URL endpoint const fullEndpointUrl = `${endpoint}/api/keys`; - + // Make request to refresh endpoint const response = await fetch(fullEndpointUrl, { - method: 'POST', + method: "POST", headers, - body: JSON.stringify(requestBody) + body: JSON.stringify(requestBody), }); - + if (!response.ok) { // Check for specific error situations if (response.status === 401 || response.status === 403) { @@ -107,30 +107,28 @@ async function refreshApiKey( refreshTokenError.name = "RefreshTokenError"; throw refreshTokenError; } - + const errorData = await response.json(); - throw new Error( - `Failed to refresh key: ${errorData.error || response.statusText}` - ); + throw new Error(`Failed to refresh key: ${errorData.error || response.statusText}`); } - + // Parse the response const data = await response.json(); - + // Extract the key and relevant metadata if (!data.key) { - throw new Error('API key not found in refresh response'); + throw new Error("API key not found in refresh response"); } - + // Store the key metadata for potential future use // This would allow extracting the hash later for top-up attempts storeKeyMetadata(data); - + // For now, always return with topup=false since the backend doesn't support topup yet // When topup is implemented on the backend, this can be updated return { apiKey: data.key, - topup: false // Will be true when backend supports top-up feature + topup: false, // Will be true when backend supports top-up feature }; } catch (error) { // Re-throw refresh token errors with specific type @@ -155,7 +153,7 @@ function storeKeyMetadata(data: any): void { if (!keyStore.metadata) { keyStore.metadata = {}; } - + // Store the metadata with the key as the index keyStore.metadata[data.key] = { hash: data.hash, @@ -164,7 +162,7 @@ function storeKeyMetadata(data: any): void { limit: data.limit, usage: data.usage, created_at: data.created_at, - updated_at: data.updated_at + updated_at: data.updated_at, }; } ``` @@ -179,7 +177,7 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions keyStore.isRefreshing = true; const result = await refreshApiKey(null, keyStore.refreshEndpoint, keyStore.refreshToken); keyStore.current = result.apiKey; - + // Update environment variables/globals with the new key if (typeof process !== "undefined" && process.env) { process.env.CALLAI_API_KEY = result.apiKey; @@ -187,7 +185,7 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions if (typeof window !== "undefined") { (window as any).CALLAI_API_KEY = result.apiKey; } - + // Now we have a key, so continue with the call } catch (initialKeyError) { console.error("Failed to get initial API key:", initialKeyError); @@ -205,21 +203,21 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions if (options.skipRefresh) { throw error; } - + // Check for 4xx error which might indicate an expired/invalid key const needsNewKey = isNewKeyError(error, options.debug || false); - + // Only attempt retry if we have a refreshEndpoint and either we need a new key or we have no key if (keyStore.refreshEndpoint && (needsNewKey || (!options.apiKey && !keyStore.current))) { // Attempt to refresh the key through handleApiError try { // This will throw if the refresh fails or can't be attempted - await handleApiError(error, 'callAi', options.debug || false, { + await handleApiError(error, "callAi", options.debug || false, { apiKey: options.apiKey || keyStore.current, endpoint: options.endpoint, - skipRefresh: options.skipRefresh + skipRefresh: options.skipRefresh, }); - + // If we reach here, key refresh was successful - retry with potentially new key const retryOptions = { ...options, apiKey: keyStore.current }; return await callAIInternal(prompt, retryOptions); @@ -228,7 +226,7 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions throw error; } } - + // For other errors, just throw throw error; } @@ -242,53 +240,53 @@ async function handleApiError( error: any, context: string, debug: boolean = false, - options: { apiKey?: string; endpoint?: string; skipRefresh?: boolean } = {} + options: { apiKey?: string; endpoint?: string; skipRefresh?: boolean } = {}, ): Promise { if (debug) { console.error(`[callAi:${context}]:`, error); } - + // Skip key refresh if explicitly requested if (options.skipRefresh) { throw new Error(`${context}: ${String(error)}`); } - + // Check if this error indicates we need a new key const needsNewKey = isNewKeyError(error, debug); const noKey = !options.apiKey && !keyStore.current; - + // Try to refresh key if (we need a new key OR we have no key) AND refreshEndpoint is configured if ((needsNewKey || noKey) && keyStore.refreshEndpoint) { // Don't try to refresh if we've tried too recently (unless we have no key at all) const now = Date.now(); const minRefreshInterval = 5000; // 5 seconds - - if (!keyStore.isRefreshing && (noKey || (now - keyStore.lastRefreshAttempt) > minRefreshInterval)) { + + if (!keyStore.isRefreshing && (noKey || now - keyStore.lastRefreshAttempt > minRefreshInterval)) { try { keyStore.isRefreshing = true; keyStore.lastRefreshAttempt = now; - + // Call refresh endpoint - pass current key if we have one const currentKey = options.apiKey || keyStore.current; const result = await refreshApiKey(currentKey, keyStore.refreshEndpoint, keyStore.refreshToken); - + // If the server indicated this is a top-up (and we already have a key), keep using our current key // Otherwise use the new key that was returned if (!result.topup) { // Update the key in our store with the new key keyStore.current = result.apiKey; - + // If we're in a Node.js environment, also update process.env if (typeof process !== "undefined" && process.env) { process.env.CALLAI_API_KEY = result.apiKey; } - + // If we're in a browser, also update window if (typeof window !== "undefined") { (window as any).CALLAI_API_KEY = result.apiKey; } } - + // Signal that key refresh was attempted (whether top-up or new key) return; // This will allow the caller to retry } catch (refreshError) { @@ -299,7 +297,7 @@ async function handleApiError( } } } - + // If we reach here, either key refresh failed or wasn't attempted throw new Error(`${context}: ${String(error)}`); } @@ -313,15 +311,15 @@ const keyStore = { // Default key from environment or config current: null as string | null, // The refresh endpoint URL - defaults to vibecode.garden - refreshEndpoint: 'https://vibecode.garden' as string | null, + refreshEndpoint: "https://vibecode.garden" as string | null, // Authentication token for refresh endpoint - defaults to use-vibes - refreshToken: 'use-vibes' as string | null, + refreshToken: "use-vibes" as string | null, // Flag to prevent concurrent refresh attempts isRefreshing: false, // Timestamp of last refresh attempt (to prevent too frequent refreshes) lastRefreshAttempt: 0, // Storage for key metadata (useful for future top-up implementation) - metadata: {} as Record + metadata: {} as Record, }; ``` @@ -336,9 +334,9 @@ async function handleTopupKey(requestData, provisioningKey) { const { keyHash, additionalAmount = 1.0 } = requestData; if (!keyHash) { - return new Response(JSON.stringify({ error: 'Key hash is required' }), { + return new Response(JSON.stringify({ error: "Key hash is required" }), { status: 400, - headers: { 'Content-Type': 'application/json' }, + headers: { "Content-Type": "application/json" }, }); } @@ -346,17 +344,20 @@ async function handleTopupKey(requestData, provisioningKey) { // This would depend on OpenRouter's API capabilities // Return success response with the same key but updated limits - return new Response(JSON.stringify({ - topup: true, - // Include other key metadata - }), { - status: 200, - headers: { 'Content-Type': 'application/json' }, - }); + return new Response( + JSON.stringify({ + topup: true, + // Include other key metadata + }), + { + status: 200, + headers: { "Content-Type": "application/json" }, + }, + ); } catch (error) { return new Response(JSON.stringify({ error: error.message }), { status: 500, - headers: { 'Content-Type': 'application/json' }, + headers: { "Content-Type": "application/json" }, }); } } @@ -372,4 +373,4 @@ async function handleTopupKey(requestData, provisioningKey) { 2. [ ] Implement `getHashFromKey` and `storeKeyMetadata` functions 3. [ ] Update `refreshApiKey` to work with the Netlify endpoint format 4. [ ] Test with the existing backend endpoint -5. [ ] Work with backend team to add top-up functionality if desired \ No newline at end of file +5. [ ] Work with backend team to add top-up functionality if desired diff --git a/notes/rekey.md b/notes/rekey.md index 3aff80c..b3afedb 100644 --- a/notes/rekey.md +++ b/notes/rekey.md @@ -28,13 +28,13 @@ Create utility functions to avoid duplication: * @returns True if the error suggests we need a new key */ export function isNewKeyError(error: any, debug: boolean = false): boolean { - const status = error?.status || error?.statusCode || (error?.response?.status); + const status = error?.status || error?.statusCode || error?.response?.status; const is4xx = status >= 400 && status < 500; - + if (is4xx && debug) { console.log(`[callAi:debug] Key error detected: status=${status}, message=${String(error)}`); } - + return is4xx; } ``` @@ -97,12 +97,12 @@ function prepareRequestParams( keyStore.current || (typeof window !== "undefined" ? (window as any).CALLAI_API_KEY : null) || (typeof process !== "undefined" && process.env ? process.env.CALLAI_API_KEY : null); - + // If key provided in options, update the store if (options.apiKey) { keyStore.current = options.apiKey; } - + // Rest of the existing function... } ``` @@ -116,48 +116,48 @@ async function handleApiError( error: any, context: string, debug: boolean = false, - options: { apiKey?: string; endpoint?: string } = {} + options: { apiKey?: string; endpoint?: string } = {}, ): Promise { if (debug) { console.error(`[callAi:${context}]:`, error); } - + // Check if this error indicates we need a new key const needsNewKey = isNewKeyError(error, debug); const noKey = !options.apiKey && !keyStore.current; - + // Try to refresh key if (we need a new key OR we have no key) AND refreshEndpoint is configured if ((needsNewKey || noKey) && keyStore.refreshEndpoint) { // Don't try to refresh if we've tried too recently (unless we have no key at all) const now = Date.now(); const minRefreshInterval = 5000; // 5 seconds - - if (!keyStore.isRefreshing && (noKey || (now - keyStore.lastRefreshAttempt) > minRefreshInterval)) { + + if (!keyStore.isRefreshing && (noKey || now - keyStore.lastRefreshAttempt > minRefreshInterval)) { try { keyStore.isRefreshing = true; keyStore.lastRefreshAttempt = now; - + // Call refresh endpoint - pass current key if we have one const currentKey = options.apiKey || keyStore.current; const result = await refreshApiKey(currentKey, keyStore.refreshEndpoint, keyStore.refreshToken); - + // If the server indicated this is a top-up (and we already have a key), keep using our current key // Otherwise use the new key that was returned if (!result.topup) { // Update the key in our store with the new key keyStore.current = result.apiKey; - + // If we're in a Node.js environment, also update process.env if (typeof process !== "undefined" && process.env) { process.env.CALLAI_API_KEY = result.apiKey; } - + // If we're in a browser, also update window if (typeof window !== "undefined") { (window as any).CALLAI_API_KEY = result.apiKey; } } - + // Signal that key refresh was attempted (whether top-up or new key) return; // This will allow the caller to retry } catch (refreshError) { @@ -168,7 +168,7 @@ async function handleApiError( } } } - + // If we reach here, either key refresh failed or wasn't attempted throw new Error(`${context}: ${String(error)}`); } @@ -185,48 +185,48 @@ async function handleApiError( * @returns Object containing the API key and topup flag */ async function refreshApiKey( - currentKey: string | null, + currentKey: string | null, endpoint: string, - refreshToken: string | null + refreshToken: string | null, ): Promise<{ apiKey: string; topup: boolean }> { try { // Prepare headers with authentication const headers: Record = { - 'Content-Type': 'application/json' + "Content-Type": "application/json", }; - + // Use the refresh token for authentication if available, otherwise use the current key if (refreshToken) { - headers['Authorization'] = `Bearer ${refreshToken}`; + headers["Authorization"] = `Bearer ${refreshToken}`; } else if (currentKey) { - headers['Authorization'] = `Bearer ${currentKey}`; + headers["Authorization"] = `Bearer ${currentKey}`; } - + // Make request to refresh endpoint const response = await fetch(endpoint, { - method: 'POST', + method: "POST", headers, - body: JSON.stringify({ - action: 'refresh', - currentKey: currentKey || undefined // only send if we have one - }) + body: JSON.stringify({ + action: "refresh", + currentKey: currentKey || undefined, // only send if we have one + }), }); - + if (!response.ok) { throw new Error(`Refresh failed: ${response.status} ${response.statusText}`); } - + const data = await response.json(); - + // Check for required fields in the response if (!data.apiKey) { - throw new Error('API key not found in refresh response'); + throw new Error("API key not found in refresh response"); } - + // Return both the API key and whether this was a top-up return { apiKey: data.apiKey, - topup: Boolean(data.topup) // convert to boolean in case it's truthy but not boolean + topup: Boolean(data.topup), // convert to boolean in case it's truthy but not boolean }; } catch (error) { throw new Error(`Key refresh failed: ${String(error)}`); @@ -246,7 +246,7 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions keyStore.isRefreshing = true; const result = await refreshApiKey(null, keyStore.refreshEndpoint, keyStore.refreshToken); keyStore.current = result.apiKey; - + // Update environment variables/globals with the new key if (typeof process !== "undefined" && process.env) { process.env.CALLAI_API_KEY = result.apiKey; @@ -254,7 +254,7 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions if (typeof window !== "undefined") { (window as any).CALLAI_API_KEY = result.apiKey; } - + // Now we have a key, so continue with the call } catch (initialKeyError) { console.error("Failed to get initial API key:", initialKeyError); @@ -270,17 +270,17 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions } catch (error) { // Check if we need a new key const needsNewKey = isNewKeyError(error, options.debug || false); - + // Only attempt retry if we have a refreshEndpoint and either we need a new key or we have no key if (keyStore.refreshEndpoint && (needsNewKey || (!options.apiKey && !keyStore.current))) { // Attempt to refresh the key through handleApiError try { // This will throw if the refresh fails or can't be attempted - await handleApiError(error, 'callAi', options.debug || false, { + await handleApiError(error, "callAi", options.debug || false, { apiKey: options.apiKey || keyStore.current, - endpoint: options.endpoint + endpoint: options.endpoint, }); - + // If we reach here, key refresh was successful - retry with potentially new key const retryOptions = { ...options, apiKey: keyStore.current }; return await callAIInternal(prompt, retryOptions); @@ -289,7 +289,7 @@ export async function callAi(prompt: string | Message[], options: CallAIOptions throw error; } } - + // For other errors, just throw throw error; } @@ -313,13 +313,14 @@ CALL_AI_REFRESH_TOKEN=your-refresh-service-authentication-token ``` The variables serve these purposes: + - `CALLAI_API_KEY`: The API key for making AI calls - `CALLAI_REFRESH_ENDPOINT`: The endpoint to call when refreshing keys - `CALL_AI_REFRESH_TOKEN`: Authentication token for the key refresh service (separate from the API key) **Important Note on Refresh Token Management:** -The `CALL_AI_REFRESH_TOKEN` will be managed and refreshed by the owning page/application on a schedule we cannot predict. If the refresh token itself expires or becomes invalid, we need to clearly communicate this to the caller so they can handle refresh token renewal. +The `CALL_AI_REFRESH_TOKEN` will be managed and refreshed by the owning page/application on a schedule we cannot predict. If the refresh token itself expires or becomes invalid, we need to clearly communicate this to the caller so they can handle refresh token renewal. When the refresh endpoint returns a 401/403 error specifically indicating the refresh token is invalid (rather than the API key), we should throw a distinct error type that allows the caller to recognize this specific situation: @@ -342,4 +343,3 @@ This allows applications to implement their own refresh token renewal logic when 3. Updated keys are stored and used for future calls 4. No instance state, everything is module-level 5. Only one endpoint/keyset at a time (as specified) - \ No newline at end of file diff --git a/notes/release-process.md b/notes/release-process.md index 2c35ab8..a9371e3 100644 --- a/notes/release-process.md +++ b/notes/release-process.md @@ -12,9 +12,11 @@ This document outlines the steps to create a new release of the call-ai package. ## Release Steps 1. Update version in package.json: + ```bash npm version patch|minor|major ``` + - Use `patch` for bug fixes (0.x.Y) - Use `minor` for new features (0.X.0) - Use `major` for breaking changes (X.0.0) @@ -25,6 +27,7 @@ This document outlines the steps to create a new release of the call-ai package. - Create a new git tag 3. Push the changes and the tag: + ```bash git push && git push --tags ``` @@ -43,6 +46,7 @@ This document outlines the steps to create a new release of the call-ai package. ## Troubleshooting If the CI process fails: + 1. Check the GitHub Actions logs for details 2. Make necessary fixes 3. Delete the failed tag locally and remotely: @@ -50,4 +54,4 @@ If the CI process fails: git tag -d v0.x.y git push --delete origin v0.x.y ``` -4. Start the process again once the issues are fixed \ No newline at end of file +4. Start the process again once the issues are fixed diff --git a/notes/split-api.md b/notes/split-api.md index d032029..8353354 100644 --- a/notes/split-api.md +++ b/notes/split-api.md @@ -22,7 +22,7 @@ The refactoring got stuck on two main issues: // This test passes with the original implementation but fails with our refactored version it("should handle errors during API call for streaming", async () => { (global.fetch as jest.Mock).mockRejectedValue(new Error("Network error")); - + try { const options = { apiKey: "test-api-key", stream: true }; await callAi("Hello", options); diff --git a/notes/test-list.md b/notes/test-list.md index 5a0dc09..2bf33cd 100644 --- a/notes/test-list.md +++ b/notes/test-list.md @@ -1,18 +1,24 @@ # Test List ## schema-result.test.ts + Tests structured data responses using schema definitions with different AI models. + - **OpenAI schema test**: ✅ PASS - Successfully returns valid structured data with schema - **Claude schema test**: ❌ FAIL - Timeout after 30 seconds - **OpenAI tool mode test**: ✅ PASS - Successfully supports tool mode when enabled ## schema-handling.test.ts + Tests schema handling strategies across different AI models. + - **Schema implementation method test**: ❌ FAIL - Error reading json property from undefined - **Model-specific schema strategies test**: ✅ PASS - All models successfully use their optimal schema approach by default ## callai.integration.test.ts + Integration tests across multiple AI providers with various request types. + - **OpenAI streaming tests**: ✅ PASS - Successfully streams structured data responses - **Claude tool mode tests**: ❌ PARTIAL - Warning about streaming with tool mode - **DeepSeek tests**: ✅ PASS - Successfully returns structured data @@ -22,7 +28,9 @@ Integration tests across multiple AI providers with various request types. - **Various model system message tests**: ✅ PASS - All tested models handle system messages correctly ## openai-wire.test.ts + Tests for OpenAI API wire protocol implementation. + - **JSON schema request formatting**: ✅ PASS - Correctly formats OpenAI JSON schema requests - **JSON schema response handling**: ✅ PASS - Correctly handles OpenAI responses with JSON schema - **JSON schema streaming**: ✅ PASS - Correctly handles OpenAI streaming with JSON schema @@ -30,13 +38,17 @@ Tests for OpenAI API wire protocol implementation. - **OpenAI tool mode support**: ✅ PASS - Supports tool mode for OpenAI models when enabled ## openai-tool-integration.test.ts + Tests integration of schema handling with OpenAI and Claude APIs. + - **Claude structured data test**: ❌ FAIL - Timeout after 30 seconds - **OpenAI structured data test**: ✅ PASS - Successfully returns structured data with schema - **OpenAI useToolMode test**: ✅ PASS - Successfully returns valid structured data with tool mode option ## unit.test.ts + Unit tests for the core functions of the callAi library. + - **API key requirement tests**: ✅ PASS - Properly handles API key requirements for both streaming and non-streaming - **Request parameter tests**: ✅ PASS - Correctly formats POST request parameters - **Schema handling tests**: ✅ PASS - Correctly processes various schema formats and structures @@ -44,7 +56,9 @@ Unit tests for the core functions of the callAi library. - **Streaming tests**: ✅ PASS - Correctly implements streaming functionality with schemas ## gemini-wire.test.ts + Tests for Gemini API wire protocol implementation. + - **JSON schema format test**: ✅ PASS - Uses JSON schema format by default for Gemini with schema - **Response handling test**: ✅ PASS - Correctly handles Gemini responses with schema - **System messages test**: ✅ PASS - Correctly passes through system messages @@ -52,7 +66,9 @@ Tests for Gemini API wire protocol implementation. - **Response format schema test**: ✅ PASS - Correctly handles schema when response_format is supported ## claude-wire.test.ts + Tests for Claude API wire protocol implementation. + - **System message approach test**: ❌ FAIL - Expected system message role but got user role - **Tool mode test**: ✅ PASS - Uses native tool mode with Claude for schema handling - **JSON response handling test**: ✅ PASS - Correctly handles Claude JSON responses @@ -61,26 +77,36 @@ Tests for Claude API wire protocol implementation. - **System message response test**: ✅ PASS - Correctly handles Claude responses with system message ## llama3-wire.test.ts + Tests for Llama3 API wire protocol implementation. + - **System message approach test**: ✅ PASS - Uses system message approach for Llama3 with schema - **Response handling test**: ✅ PASS - Correctly handles Llama3 responses with schema ## gpt4turbo-wire.test.ts + Tests for GPT-4 Turbo API wire protocol implementation. + - **System message approach test**: ✅ PASS - Uses system message approach for GPT-4 Turbo with schema - **Response handling test**: ✅ PASS - Correctly handles GPT-4 Turbo responses with schema ## deepseek-wire.test.ts + Tests for DeepSeek API wire protocol implementation. + - **System message approach test**: ✅ PASS - Uses system message approach for DeepSeek with schema - **Response handling test**: ✅ PASS - Correctly handles DeepSeek responses with schema ## openai-weather-wire.test.ts + Tests for OpenAI weather wire protocol implementation with complex schemas. + - **JSON schema format test**: ✅ PASS - Successfully formats complex nested schema for weather forecasts ## fetch.integration.test.ts + Integration tests for direct API calls using fetch. + - **OpenRouter schema format test**: ✅ PASS - Validates exact OpenRouter schema format - **OpenAI structured output test**: ✅ PASS - Correctly formats schema for OpenAI structured output - **Schema formatting debug test**: ✅ PASS - Debugs exact schema format sent to OpenRouter @@ -92,5 +118,7 @@ Integration tests for direct API calls using fetch. - **Gemini JSON schema test**: ✅ PASS - Successfully uses JSON schema format with Google Gemini ## claude-tool-test.js and claude-tool-direct.js + Manual test scripts for Claude tool mode direct API calls. + - **Not Jest test files**: ⚠️ NOTE - These are manual test scripts, not automated tests diff --git a/notes/testing.md b/notes/testing.md index cc4c6c4..cca42e7 100644 --- a/notes/testing.md +++ b/notes/testing.md @@ -14,6 +14,7 @@ npx jest test/callai.integration.test.ts -t "should handle streaming with gpt4tu ``` Benefits: + - Faster feedback loop - Clearer error messages - Easier to analyze one problem at a time @@ -31,10 +32,11 @@ console.log(`[DEBUG] Response status:`, response.status); console.log(`[DEBUG] Response headers:`, Object.fromEntries([...response.headers.entries()])); // For streaming responses, log each chunk -console.log(`[DEBUG] Raw chunk #${rawChunkCount}:`, chunk.substring(0, 100) + (chunk.length > 100 ? '...' : '')); +console.log(`[DEBUG] Raw chunk #${rawChunkCount}:`, chunk.substring(0, 100) + (chunk.length > 100 ? "..." : "")); ``` Look for: + - What exact request are we sending? - What exact response are we receiving? - For streaming, what are the individual chunks? @@ -49,6 +51,7 @@ for i in {1..3}; do npx jest test/callai.integration.test.ts -t "should handle s ``` This helps distinguish between: + - Genuine code issues - Transient API issues - Model inconsistency in responses @@ -85,6 +88,7 @@ AI model responses are inherently non-deterministic, which can lead to flaky tes ### Comprehensive Test Coverage Ensure tests cover: + - Different models (OpenAI, Claude, Gemini, etc.) - Different modes (streaming vs. non-streaming) - Various schema complexities (simple, nested, arrays) @@ -93,8 +97,9 @@ Ensure tests cover: ## Test Automation Consider implementing: + - Nightly full test runs against actual APIs - Limited test suites for PR validation - Mocked responses for unit tests to avoid API costs -By following these practices, we can maintain a reliable test suite despite the inherent variability of AI model responses. \ No newline at end of file +By following these practices, we can maintain a reliable test suite despite the inherent variability of AI model responses. diff --git a/package.json b/package.json index a4dc6f7..d51f22d 100644 --- a/package.json +++ b/package.json @@ -1,13 +1,12 @@ { - "name": "call-ai", - "version": "0.10.2", + "name": "call-ai-monorepo", + "version": "0.0.0", + "private": true, "description": "Lightweight library for making AI API calls with streaming support", - "main": "dist/index.js", - "browser": "dist/index.js", - "types": "dist/index.d.ts", - "files": [ - "dist" - ], + "type": "module", + "engines": { + "node": ">=22" + }, "repository": { "type": "git", "url": "https://github.com/fireproof-storage/call-ai.git" @@ -18,14 +17,12 @@ }, "scripts": { "build": "tsc", - "test": "jest --testMatch=\"**/*unit*.test.ts\"", - "test:integration": "jest simple.integration", + "lint": "eslint .", + "test": "vitest --run", + "test:integration": "vitest simple.integration", "test:all": "pnpm test && pnpm test:integration", - "prepublishOnly": "npm run build", - "typecheck": "tsc --noEmit", - "format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"", - "coverage": "jest --coverage", - "check": "npm run typecheck && npm run format && npm run test && npm run build" + "format": "prettier --write .", + "coverage": "vitest --coverage" }, "keywords": [ "ai", @@ -39,18 +36,13 @@ "author": "", "license": "MIT or Apache-2.0", "devDependencies": { - "@jest/globals": "^30.0.5", - "@types/jest": "^30.0.0", - "@types/node": "^24.0.15", - "@types/node-fetch": "^2.6.12", - "dotenv": "^17.2.0", - "jest": "^30.0.4", - "node-fetch": "^3.3.2", + "@eslint/js": "^9.31.0", + "@types/node": "^24.1.0", + "eslint": "^9.31.0", + "eslint-plugin-import": "^2.32.0", "prettier": "^3.5.3", - "ts-jest": "^29.1.1", - "typescript": "^5.1.6" - }, - "engines": { - "node": ">=14.0.0" + "typescript": "^5.8.3", + "typescript-eslint": "^8.37.0", + "vitest": "^3.2.4" } } diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml new file mode 100644 index 0000000..85084c1 --- /dev/null +++ b/pnpm-workspace.yaml @@ -0,0 +1,3 @@ +packages: + - "call-ai" + - "test" diff --git a/src/index.ts b/src/index.ts deleted file mode 100644 index 4a12257..0000000 --- a/src/index.ts +++ /dev/null @@ -1,14 +0,0 @@ -/** - * call-ai: A lightweight library for making AI API calls - */ - -// Export public types -export * from "./types"; - -// Export API functions -export { callAi, getMeta } from "./api"; -// Backward compatibility for callAI (uppercase AI) -export { callAi as callAI } from "./api"; - -// Export image generation function -export { imageGen } from "./image"; diff --git a/src/strategies/index.ts b/src/strategies/index.ts deleted file mode 100644 index e4ee459..0000000 --- a/src/strategies/index.ts +++ /dev/null @@ -1,5 +0,0 @@ -/** - * Strategy exports - */ -export * from "./model-strategies"; -export * from "./strategy-selector"; diff --git a/src/types.ts b/src/types.ts deleted file mode 100644 index 00ac6ae..0000000 --- a/src/types.ts +++ /dev/null @@ -1,265 +0,0 @@ -/** - * Type definitions for call-ai - */ - -/** - * Content types for multimodal messages - */ -export type ContentItem = { - type: "text" | "image_url"; - text?: string; - image_url?: { - url: string; - }; -}; - -/** - * Message type supporting both simple string content and multimodal content - */ -export type Message = { - role: "user" | "system" | "assistant"; - content: string | ContentItem[]; -}; - -/** - * Metadata associated with a response - * Available through the getMeta() helper function - */ -export interface ResponseMeta { - /** - * The model used for the response - */ - model: string; - - /** - * Timing information about the request - */ - timing?: { - startTime: number; - endTime?: number; - duration?: number; - }; - - /** - * Raw response data from the fetch call - * Contains the parsed JSON result from the API call - */ - rawResponse?: any; -} - -export interface Schema { - /** - * Optional schema name - will be sent to OpenRouter if provided - * If not specified, defaults to "result" - */ - name?: string; - /** - * Properties defining the structure of your schema - */ - properties: Record; - /** - * Fields that are required in the response (defaults to all properties) - */ - required?: string[]; - /** - * Whether to allow fields not defined in properties (defaults to false) - */ - additionalProperties?: boolean; - /** - * Any additional schema properties to pass through - */ - [key: string]: any; -} - -/** - * Strategy interface for handling different model types - */ -export interface ModelStrategy { - name: string; - prepareRequest: (schema: Schema | null, messages: Message[]) => any; - processResponse: (content: string | any) => string; - shouldForceStream?: boolean; -} - -/** - * Schema strategies for different model types - */ -export type SchemaStrategyType = - | "json_schema" - | "tool_mode" - | "system_message" - | "none"; - -/** - * Strategy selection result - */ -export interface SchemaStrategy { - strategy: SchemaStrategyType; - model: string; - prepareRequest: ModelStrategy["prepareRequest"]; - processResponse: ModelStrategy["processResponse"]; - shouldForceStream: boolean; -} - -/** - * Return type for streaming API calls - */ -export type StreamResponse = AsyncGenerator; - -/** - * @internal - * Internal type for backward compatibility with v0.6.x - * This type is not exposed in public API documentation - */ -export type ThenableStreamResponse = AsyncGenerator & - Promise; - -export interface CallAIOptions { - /** - * API key for authentication - */ - apiKey?: string; - - /** - * Model ID to use for the request - */ - model?: string; - - /** - * API endpoint to send the request to - */ - endpoint?: string; - - /** - * Custom origin for chat API - * Can also be set via window.CALLAI_CHAT_URL or process.env.CALLAI_CHAT_URL - */ - chatUrl?: string; - - /** - * Whether to stream the response - */ - stream?: boolean; - - /** - * Authentication token for key refresh service - * Can also be set via window.CALL_AI_REFRESH_TOKEN, process.env.CALL_AI_REFRESH_TOKEN, or default to "use-vibes" - */ - refreshToken?: string; - - /** - * Callback function to update refresh token when current token fails - * Gets called with the current failing token and should return a new token - * @param currentToken The current refresh token that failed - * @returns A Promise that resolves to a new refresh token - */ - updateRefreshToken?: (currentToken: string) => Promise; - - /** - * Schema for structured output - */ - schema?: Schema | null; - - /** - * Modalities to enable in the response (e.g., ["image", "text"]) - * Used for multimodal models that can generate images - */ - modalities?: string[]; - - /** - * Whether to skip retry with fallback model when model errors occur - * Useful in testing and cases where retries should be suppressed - */ - skipRetry?: boolean; - - /** - * Skip key refresh on 4xx errors - * Useful for testing error conditions or when you want to handle refresh manually - */ - skipRefresh?: boolean; - - /** - * Enable raw response logging without any filtering or processing - */ - debug?: boolean; - - /** - * Any additional options to pass to the API - */ - [key: string]: any; -} - -export interface AIResponse { - text: string; - usage?: { - promptTokens: number; - completionTokens: number; - totalTokens: number; - }; - model: string; -} - -/** - * Response from image generation API - */ -export interface ImageResponse { - created: number; - data: { - b64_json: string; - url?: string; - revised_prompt?: string; - }[]; -} - -/** - * Options for image generation - */ -export interface ImageGenOptions { - /** - * API key for authentication - * Defaults to "VIBES_DIY" - */ - apiKey?: string; - - /** - * Model to use for image generation - * Defaults to "gpt-image-1" - */ - model?: string; - - /** - * Size of the generated image - */ - size?: string; - - /** - * Quality of the generated image - */ - quality?: string; - - /** - * Style of the generated image - */ - style?: string; - - /** - * For image editing: array of File objects to be edited - */ - images?: File[]; - - /** - * Custom base URL for the image generation API - * Can also be set via window.CALLAI_IMG_URL or process.env.CALLAI_IMG_URL - */ - imgUrl?: string; - - /** - * Enable debug logging - */ - debug?: boolean; -} - -/** - * @deprecated Use ImageGenOptions instead - */ -export interface ImageEditOptions extends ImageGenOptions {} diff --git a/src/utils.ts b/src/utils.ts deleted file mode 100644 index c0ddff1..0000000 --- a/src/utils.ts +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Utility functions for call-ai - */ - -/** - * Recursively adds additionalProperties: false to all object types in a schema - * This is needed for OpenAI's strict schema validation in streaming mode - */ -export function recursivelyAddAdditionalProperties(schema: any): any { - // Clone to avoid modifying the original - const result = { ...schema }; - - // If this is an object type, ensure it has additionalProperties: false - if (result.type === "object") { - // Set additionalProperties if not already set - if (result.additionalProperties === undefined) { - result.additionalProperties = false; - } - - // Process nested properties if they exist - if (result.properties) { - result.properties = { ...result.properties }; - - // Set required if not already set - OpenAI requires this for all nested objects - if (result.required === undefined) { - result.required = Object.keys(result.properties); - } - - // Check each property - Object.keys(result.properties).forEach((key) => { - const prop = result.properties[key]; - - // If property is an object or array type, recursively process it - if (prop && typeof prop === "object") { - result.properties[key] = recursivelyAddAdditionalProperties(prop); - - // For nested objects, ensure they also have all properties in their required field - if (prop.type === "object" && prop.properties) { - prop.required = Object.keys(prop.properties); - } - } - }); - } - } - - // Handle nested objects in arrays - if ( - result.type === "array" && - result.items && - typeof result.items === "object" - ) { - result.items = recursivelyAddAdditionalProperties(result.items); - - // If array items are objects, ensure they have all properties in required - if (result.items.type === "object" && result.items.properties) { - result.items.required = Object.keys(result.items.properties); - } - } - - return result; -} diff --git a/test/claude-json-test.ts b/test/claude-json-test.ts index 02558f4..089ed8b 100644 --- a/test/claude-json-test.ts +++ b/test/claude-json-test.ts @@ -1,9 +1,6 @@ -import { callAi, getMeta } from "../src/index"; -import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, getMeta, callAiEnv } from "call-ai"; +import { expectOrWarn } from "./test-helper.js"; +import { describe, it } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures // jest.retryTimes(2, { logErrorsBeforeRetry: true }); @@ -12,8 +9,7 @@ dotenv.config(); // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; -const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); +const haveApiKey = callAiEnv.CALLAI_API_KEY; // Timeout for individual test const TIMEOUT = 30000; @@ -33,32 +29,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, - debugValue?: any, // Added optional debug value parameter -) => { - if (model.grade === "A") { - if (!condition) { - // Enhanced debug logging for failures - console.log(`DETAILED FAILURE for ${model.id}: ${message}`); - if (debugValue !== undefined) { - console.log( - "Debug value:", - typeof debugValue === "object" - ? JSON.stringify(debugValue, null, 2) - : debugValue, - ); - } - } - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -77,20 +47,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -116,7 +81,7 @@ describe("Claude JSON property splitting test", () => { const result = await callAi( "Provide information about France. Population should be expressed in millions (e.g., 67.5 for 67.5 million people).", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, stream: true, // Streaming must be enabled to trigger the property splitting issue schema: { @@ -134,7 +99,7 @@ describe("Claude JSON property splitting test", () => { ); // Get the metadata - const resultMeta = getMeta(result); + // const resultMeta = getMeta(result); // Verify response expectOrWarn( @@ -149,12 +114,8 @@ describe("Claude JSON property splitting test", () => { if (typeof result === "object" && Symbol.asyncIterator in result) { // Handle streaming response - console.log( - `\n===== Starting streaming test with ${modelName} =====`, - ); - console.log( - `This test will pass if property names split across chunks are handled correctly`, - ); + console.log(`\n===== Starting streaming test with ${modelName} =====`); + console.log(`This test will pass if property names split across chunks are handled correctly`); try { // Collect all chunks @@ -165,27 +126,18 @@ describe("Claude JSON property splitting test", () => { finalResult = chunk; } - console.log( - `\n===== Received ${chunkCount} chunks from ${modelName} =====`, - ); - console.log( - finalResult.substring(0, 500) + - (finalResult.length > 500 ? "..." : ""), - ); + console.log(`\n===== Received ${chunkCount} chunks from ${modelName} =====`); + console.log(finalResult.substring(0, 500) + (finalResult.length > 500 ? "..." : "")); // This is the key part - parse the final JSON result // Without the fix, this might fail when property names are split across chunks const data = JSON.parse(finalResult); // Log parsed data for debugging - console.log( - `\n===== Successfully parsed data from ${modelName} =====`, - ); + console.log(`\n===== Successfully parsed data from ${modelName} =====`); console.log(JSON.stringify(data, null, 2)); - console.log( - `\n===== TEST PASSED: JSON property splitting handled correctly =====`, - ); + console.log(`\n===== TEST PASSED: JSON property splitting handled correctly =====`); // Check timing info const meta = getMeta(generator); @@ -203,9 +155,7 @@ describe("Claude JSON property splitting test", () => { meta.timing, ); } else { - console.warn( - `No timing information available for ${modelName} model`, - ); + console.warn(`No timing information available for ${modelName} model`); } expectOrWarn( @@ -217,12 +167,7 @@ describe("Claude JSON property splitting test", () => { if (typeof data === "object" && data !== null) { // Check required fields - expectOrWarn( - modelId, - "capital" in data, - `Missing 'capital' in ${modelName} model response`, - Object.keys(data), - ); + expectOrWarn(modelId, "capital" in data, `Missing 'capital' in ${modelName} model response`, Object.keys(data)); expectOrWarn( modelId, "population" in data, @@ -259,16 +204,12 @@ describe("Claude JSON property splitting test", () => { if (typeof data.population === "number") { // Population should be in a reasonable range (60-70 million for France) // Check if number is already in millions (under 100) or in absolute (over 1 million) - const populationInMillions = - data.population < 1000 - ? data.population - : data.population / 1000000; + const populationInMillions = data.population < 1000 ? data.population : data.population / 1000000; // This is a critical check for our property splitting test // If "population" was split (e.g., "popul" + "ation"), parsing would fail without our fix expectOrWarn( modelId, - populationInMillions >= 60 && - populationInMillions <= 70, + populationInMillions >= 60 && populationInMillions <= 70, `Population ${data.population} (${populationInMillions.toFixed(2)}M) outside expected range in ${modelName} model response - possibly due to property name splitting`, data.population, ); @@ -287,11 +228,7 @@ describe("Claude JSON property splitting test", () => { // Should include French expectOrWarn( modelId, - data.languages.some( - (lang: string) => - typeof lang === "string" && - lang.toLowerCase().includes("french"), - ), + data.languages.some((lang: string) => typeof lang === "string" && lang.toLowerCase().includes("french")), `Languages doesn't include French in ${modelName} model response`, data.languages, ); @@ -300,12 +237,8 @@ describe("Claude JSON property splitting test", () => { } } catch (e) { // This will be hit if JSON parsing fails, which is the issue we're testing for - console.log( - `\n===== TEST FAILED: JSON parsing error in ${modelName} response =====`, - ); - console.log( - `This indicates the streaming property splitting issue is present!`, - ); + console.log(`\n===== TEST FAILED: JSON parsing error in ${modelName} response =====`); + console.log(`This indicates the streaming property splitting issue is present!`); console.log(`Error: ${e}`); console.log(`JSON that failed to parse: ${finalResult}`); diff --git a/test/claude-tool-script.ts b/test/claude-tool-script.ts new file mode 100644 index 0000000..e859b5c --- /dev/null +++ b/test/claude-tool-script.ts @@ -0,0 +1,101 @@ +// Basic test for Claude tool mode + +import { callAi, callAiEnv } from "call-ai"; +import * as process from "node:process"; + +// Helper function with timeout +async function callWithTimeout(promiseFn: () => Promise, timeout = 30000) { + return new Promise((resolve, reject) => { + // Create a timeout that will reject the promise + const timeoutId = setTimeout(() => { + reject(new Error(`Operation timed out after ${timeout}ms`)); + }, timeout); + + promiseFn() + .then(() => promiseFn()) + .then((result) => { + clearTimeout(timeoutId); + resolve(result); + }) + .catch((error) => { + clearTimeout(timeoutId); + reject(error); + }); + }); +} + +async function main() { + // Get API key from environment, trying both variables + const apiKey = callAiEnv.CALLAI_API_KEY; + + if (!apiKey) { + console.error("Error: No API key found. Please set CALLAI_API_KEY or OPENROUTER_API_KEY in your .env file."); + process.exit(1); + } + + // Define a simple todo list schema + const todoSchema = { + name: "todo_list", + properties: { + todos: { + type: "array", + items: { type: "string" }, + }, + }, + }; + + console.log("Testing Claude with tool mode (automatic):"); + + try { + // Test Claude with tool mode (automatic) + const claudeResult = await callWithTimeout(async () => { + return callAi("Create a todo list for a productive day", { + apiKey: apiKey, + model: "anthropic/claude-3-sonnet", + schema: todoSchema, + }); + }); + + console.log("Claude tool mode result:", claudeResult); + + if (typeof claudeResult === "string") { + try { + const parsedJson = JSON.parse(claudeResult); + console.log("Parsed JSON:", parsedJson); + } catch (e) { + console.log("Failed to parse JSON:", (e as Error).message); + } + } else { + console.log("Result is not a string:", typeof claudeResult); + } + + // Compare with OpenAI + console.log("-".repeat(80)); + console.log("Testing OpenAI with json_schema:"); + + const openaiResult = await callWithTimeout(async () => { + return callAi("Create a todo list for a productive day", { + apiKey: apiKey, + model: "openai/gpt-4o-mini", + schema: todoSchema, + }); + }); + + console.log("OpenAI json_schema result:", openaiResult); + + if (typeof openaiResult === "string") { + try { + const parsedJson = JSON.parse(openaiResult); + console.log("Parsed JSON:", parsedJson); + } catch (e) { + console.log("Failed to parse JSON:", (e as Error).message); + } + } else { + console.log("Result is not a string:", typeof openaiResult); + } + } catch (error) { + console.error("Error:", error); + } +} + +main(); diff --git a/test/claude-tool-test.js b/test/claude-tool-test.js deleted file mode 100644 index 2b76478..0000000 --- a/test/claude-tool-test.js +++ /dev/null @@ -1,106 +0,0 @@ -// Basic test for Claude tool mode - -const { callAi } = require('../dist/index.js'); -require('dotenv').config(); - -// Helper function with timeout -const callWithTimeout = async (promiseFn, timeout = 30000) => { - return new Promise(async (resolve, reject) => { - // Create a timeout that will reject the promise - const timeoutId = setTimeout(() => { - reject(new Error(`Operation timed out after ${timeout}ms`)); - }, timeout); - - try { - // Try to execute the function - const result = await promiseFn(); - clearTimeout(timeoutId); - resolve(result); - } catch (error) { - clearTimeout(timeoutId); - reject(error); - } - }); -}; - -async function main() { - // Get API key from environment, trying both variables - const apiKey = process.env.CALLAI_API_KEY || process.env.OPENROUTER_API_KEY; - - if (!apiKey) { - console.error('Error: No API key found. Please set CALLAI_API_KEY or OPENROUTER_API_KEY in your .env file.'); - process.exit(1); - } - - // Define a simple todo list schema - const todoSchema = { - name: 'todo_list', - properties: { - todos: { - type: 'array', - items: { type: 'string' } - } - } - }; - - console.log('Testing Claude with tool mode (automatic):'); - - try { - // Test Claude with tool mode (automatic) - const claudeResult = await callWithTimeout(async () => { - return callAi( - 'Create a todo list for a productive day', - { - apiKey: apiKey, - model: 'anthropic/claude-3-sonnet', - schema: todoSchema - } - ); - }); - - console.log('Claude tool mode result:', claudeResult); - - if (typeof claudeResult === 'string') { - try { - const parsedJson = JSON.parse(claudeResult); - console.log('Parsed JSON:', parsedJson); - } catch (e) { - console.log('Failed to parse JSON:', e.message); - } - } else { - console.log('Result is not a string:', typeof claudeResult); - } - - // Compare with OpenAI - console.log('-'.repeat(80)); - console.log('Testing OpenAI with json_schema:'); - - const openaiResult = await callWithTimeout(async () => { - return callAi( - 'Create a todo list for a productive day', - { - apiKey: apiKey, - model: 'openai/gpt-4o-mini', - schema: todoSchema - } - ); - }); - - console.log('OpenAI json_schema result:', openaiResult); - - if (typeof openaiResult === 'string') { - try { - const parsedJson = JSON.parse(openaiResult); - console.log('Parsed JSON:', parsedJson); - } catch (e) { - console.log('Failed to parse JSON:', e.message); - } - } else { - console.log('Result is not a string:', typeof openaiResult); - } - } catch (error) { - console.error('Error:', error); - } -} - -main(); \ No newline at end of file diff --git a/test/fix-callai-claude.md b/test/fix-callai-claude.md index 459bc55..ab4ddb8 100644 --- a/test/fix-callai-claude.md +++ b/test/fix-callai-claude.md @@ -9,6 +9,7 @@ We've identified a critical issue with the Claude API when using tool mode: 3. This is causing the tests to time out when using Claude with tool mode Key observations: + - System message approach works fine with Claude (completes in 2-6 seconds) - Tool mode approach times out when trying to read the response body - The issue appears to be in the response format (many empty newlines at the beginning of the response) @@ -33,14 +34,14 @@ const readResponseTextWithTimeout = async (response: Response, timeoutMs: number reject(new Error(`Response.text() timed out after ${timeoutMs}ms`)); }, timeoutMs); }); - + try { // Race the response.text() promise against the timeout const text = await Promise.race([response.text(), timeoutPromise]); return text; } catch (error) { // If timeout occurs, throw a more descriptive error - if (error instanceof Error && error.message.includes('timed out')) { + if (error instanceof Error && error.message.includes("timed out")) { throw new Error(`Timeout reading response from API. This is a known issue with Claude API when using tool mode.`); } throw error; @@ -54,7 +55,7 @@ const readResponseTextWithTimeout = async (response: Response, timeoutMs: number let responseText; try { // Use the timeout function for Claude API with tool mode - if (model.includes('claude') && tools) { + if (model.includes("claude") && tools) { responseText = await readResponseTextWithTimeout(response); } else { // For other models, use the standard response.text() @@ -62,7 +63,7 @@ try { } } catch (error) { // Handle the error, possibly fallback to system message approach - console.error('Error reading response:', error.message); + console.error("Error reading response:", error.message); throw error; } ``` @@ -70,10 +71,11 @@ try { ## Alternative Solutions 1. **Workaround**: Use system message approach instead of tool mode for Claude models + ```typescript // Detect Claude model and force system message approach even if tool mode was requested - if (model.includes('claude') && tools) { - console.warn('Tool mode with Claude may cause timeouts, using system message approach instead'); + if (model.includes("claude") && tools) { + console.warn("Tool mode with Claude may cause timeouts, using system message approach instead"); // Transform request to use system message approach // ... } @@ -88,8 +90,8 @@ try { return result; } catch (error) { // If timeout error, fallback to system message approach - if (error.message.includes('timeout')) { - console.warn('Tool mode timed out, falling back to system message approach'); + if (error.message.includes("timeout")) { + console.warn("Tool mode timed out, falling back to system message approach"); const fallbackResult = await callWithSystemMessage(); return fallbackResult; } @@ -101,4 +103,4 @@ try { We recommend implementing the timeout protection for `response.text()` as the primary fix, as it directly addresses the root cause without changing the API behavior. The timeout will prevent indefinite hanging and provide a clear error message. -Additionally, adding a warning in the documentation about potential timeouts when using Claude with tool mode would be helpful for users. \ No newline at end of file +Additionally, adding a warning in the documentation about potential timeouts when using Claude with tool mode would be helpful for users. diff --git a/test/fixtures/claude-request.json b/test/fixtures/claude-request.json index d1a9947..a852964 100644 --- a/test/fixtures/claude-request.json +++ b/test/fixtures/claude-request.json @@ -24,4 +24,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/fixtures/claude-response.json b/test/fixtures/claude-response.json index 86df225..798eb2e 100644 --- a/test/fixtures/claude-response.json +++ b/test/fixtures/claude-response.json @@ -1,21 +1,25 @@ - - - - - - - - - - - - - - - - - - - - -{"id":"gen-1742583610-KEyT7cRGDRzBRAMcJBqn","provider":"Anthropic","model":"anthropic/claude-3-sonnet","object":"chat.completion","created":1742583610,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"Sure, here's a short book recommendation in the requested format:\n\nTitle: The Alchemist\nAuthor: Paulo Coelho\nGenre: Fiction, Allegorical novel\nDescription: \"The Alchemist\" by Paulo Coelho is a beautiful and inspiring story about following your dreams and finding your true destiny. It follows the journey of a young shepherd named Santiago who travels from Spain to the Egyptian desert in pursuit of a treasure buried near the Pyramids. Along the way, he encounters various challenges and people who teach him valuable lessons about life, love, and the importance of listening to your heart. With its simple yet profound wisdom and captivating storytelling, this book will leave you feeling inspired and empowered to pursue your own dreams.","refusal":null}}],"usage":{"prompt_tokens":18,"completion_tokens":162,"total_tokens":180}} \ No newline at end of file +{ + "id": "gen-1742583610-KEyT7cRGDRzBRAMcJBqn", + "provider": "Anthropic", + "model": "anthropic/claude-3-sonnet", + "object": "chat.completion", + "created": 1742583610, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "Sure, here's a short book recommendation in the requested format:\n\nTitle: The Alchemist\nAuthor: Paulo Coelho\nGenre: Fiction, Allegorical novel\nDescription: \"The Alchemist\" by Paulo Coelho is a beautiful and inspiring story about following your dreams and finding your true destiny. It follows the journey of a young shepherd named Santiago who travels from Spain to the Egyptian desert in pursuit of a treasure buried near the Pyramids. Along the way, he encounters various challenges and people who teach him valuable lessons about life, love, and the importance of listening to your heart. With its simple yet profound wisdom and captivating storytelling, this book will leave you feeling inspired and empowered to pursue your own dreams.", + "refusal": null + } + } + ], + "usage": { + "prompt_tokens": 18, + "completion_tokens": 162, + "total_tokens": 180 + } +} diff --git a/test/fixtures/claude-simple-request.json b/test/fixtures/claude-simple-request.json index 3a9748e..9f72d4c 100644 --- a/test/fixtures/claude-simple-request.json +++ b/test/fixtures/claude-simple-request.json @@ -6,4 +6,4 @@ "content": "Create a todo list for a productive day" } ] -} \ No newline at end of file +} diff --git a/test/fixtures/claude-simple-response.json b/test/fixtures/claude-simple-response.json index 65e0610..c87b1a8 100644 --- a/test/fixtures/claude-simple-response.json +++ b/test/fixtures/claude-simple-response.json @@ -1,29 +1,25 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - -{"id":"gen-1742665954-8GVRClXt0hTjiQfS7F9x","provider":"Anthropic","model":"anthropic/claude-3-sonnet","object":"chat.completion","created":1742665954,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"Here's a sample todo list for a productive day:\n\n1. Wake up early (around 6-7 AM) and start your day with a positive mindset.\n2. Drink a glass of water and have a nutritious breakfast.\n3. Review your goals and priorities for the day.\n4. Tackle your most challenging or important task first when your energy levels are high (e.g., work on a crucial project, study for an exam, or complete a critical report).\n5. Take a short break (10-15 minutes) to recharge and stretch.\n6. Work on the next important task on your list.\n7. Have a healthy lunch and take a short walk to boost your energy levels.\n8. Continue working on your tasks, prioritizing the most important ones.\n9. Schedule a dedicated time for exercise or physical activity to stay active and energized.\n10. Review your progress and celebrate your accomplishments for the day.\n11. Plan for the next day and ensure you have everything you need (e.g., materials, resources, appointments).\n12. Wind down with a relaxing activity, such as reading, listening to music, or practicing mindfulness.\n13. Get enough sleep (7-9 hours) to recharge for the next productive day.\n\nRemember, this is a general template, and you can customize it according to your specific goals, priorities, and preferences. Additionally, it's essential to be flexible and adjust your plan as needed based on unexpected circumstances or changes in your schedule.","refusal":null}}],"usage":{"prompt_tokens":15,"completion_tokens":331,"total_tokens":346}} \ No newline at end of file +{ + "id": "gen-1742665954-8GVRClXt0hTjiQfS7F9x", + "provider": "Anthropic", + "model": "anthropic/claude-3-sonnet", + "object": "chat.completion", + "created": 1742665954, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "Here's a sample todo list for a productive day:\n\n1. Wake up early (around 6-7 AM) and start your day with a positive mindset.\n2. Drink a glass of water and have a nutritious breakfast.\n3. Review your goals and priorities for the day.\n4. Tackle your most challenging or important task first when your energy levels are high (e.g., work on a crucial project, study for an exam, or complete a critical report).\n5. Take a short break (10-15 minutes) to recharge and stretch.\n6. Work on the next important task on your list.\n7. Have a healthy lunch and take a short walk to boost your energy levels.\n8. Continue working on your tasks, prioritizing the most important ones.\n9. Schedule a dedicated time for exercise or physical activity to stay active and energized.\n10. Review your progress and celebrate your accomplishments for the day.\n11. Plan for the next day and ensure you have everything you need (e.g., materials, resources, appointments).\n12. Wind down with a relaxing activity, such as reading, listening to music, or practicing mindfulness.\n13. Get enough sleep (7-9 hours) to recharge for the next productive day.\n\nRemember, this is a general template, and you can customize it according to your specific goals, priorities, and preferences. Additionally, it's essential to be flexible and adjust your plan as needed based on unexpected circumstances or changes in your schedule.", + "refusal": null + } + } + ], + "usage": { + "prompt_tokens": 15, + "completion_tokens": 331, + "total_tokens": 346 + } +} diff --git a/test/fixtures/claude-system-request.json b/test/fixtures/claude-system-request.json index ac9b454..215d05c 100644 --- a/test/fixtures/claude-system-request.json +++ b/test/fixtures/claude-system-request.json @@ -10,4 +10,4 @@ "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." } ] -} \ No newline at end of file +} diff --git a/test/fixtures/claude-system-response.json b/test/fixtures/claude-system-response.json index 4f8a300..be9846f 100644 --- a/test/fixtures/claude-system-response.json +++ b/test/fixtures/claude-system-response.json @@ -1,7 +1,21 @@ - - - - - - -{"id":"gen-1742583625-858xN4bifrutSTSwhxdf","provider":"Anthropic","model":"anthropic/claude-3-sonnet","object":"chat.completion","created":1742583625,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"{\n \"title\": \"The Little Prince\",\n \"author\": \"Antoine de Saint-Exupéry\",\n \"year\": 1943,\n \"genre\": \"Novella\",\n \"rating\": 5\n}","refusal":null}}],"usage":{"prompt_tokens":94,"completion_tokens":59,"total_tokens":153}} \ No newline at end of file +{ + "id": "gen-1742583625-858xN4bifrutSTSwhxdf", + "provider": "Anthropic", + "model": "anthropic/claude-3-sonnet", + "object": "chat.completion", + "created": 1742583625, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "{\n \"title\": \"The Little Prince\",\n \"author\": \"Antoine de Saint-Exupéry\",\n \"year\": 1943,\n \"genre\": \"Novella\",\n \"rating\": 5\n}", + "refusal": null + } + } + ], + "usage": { "prompt_tokens": 94, "completion_tokens": 59, "total_tokens": 153 } +} diff --git a/test/fixtures/claude-tool-request.json b/test/fixtures/claude-tool-request.json index d63f3aa..5b5c699 100644 --- a/test/fixtures/claude-tool-request.json +++ b/test/fixtures/claude-tool-request.json @@ -29,4 +29,4 @@ "type": "tool", "name": "todo_list" } -} \ No newline at end of file +} diff --git a/test/fixtures/claude-tool-response.json b/test/fixtures/claude-tool-response.json index b61589d..e69de29 100644 --- a/test/fixtures/claude-tool-response.json +++ b/test/fixtures/claude-tool-response.json @@ -1,140 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/test/fixtures/deepseek-request.json b/test/fixtures/deepseek-request.json index acc4dc1..dd3539a 100644 --- a/test/fixtures/deepseek-request.json +++ b/test/fixtures/deepseek-request.json @@ -22,4 +22,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/fixtures/deepseek-response.json b/test/fixtures/deepseek-response.json index a3291b0..65382ef 100644 --- a/test/fixtures/deepseek-response.json +++ b/test/fixtures/deepseek-response.json @@ -1,25 +1,21 @@ - - - - - - - - - - - - - - - - - - - - - - - - -{"id":"gen-1742589686-QFGiw2JsjmyYEqwwnHjW","provider":"Nebius","model":"deepseek/deepseek-chat","object":"chat.completion","created":1742589686,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"**Title:** *The Alchemist* \n**Author:** Paulo Coelho \n**Genre:** Fiction, Inspirational \n**Why Read This?** A timeless tale of self-discovery and following your dreams, *The Alchemist* is a short but profound novel that inspires readers to listen to their hearts and pursue their personal legends. Perfect for anyone seeking motivation or a reminder of life's deeper purpose.","refusal":null}}],"usage":{"prompt_tokens":14,"completion_tokens":84,"total_tokens":98}} \ No newline at end of file +{ + "id": "gen-1742589686-QFGiw2JsjmyYEqwwnHjW", + "provider": "Nebius", + "model": "deepseek/deepseek-chat", + "object": "chat.completion", + "created": 1742589686, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "**Title:** *The Alchemist* \n**Author:** Paulo Coelho \n**Genre:** Fiction, Inspirational \n**Why Read This?** A timeless tale of self-discovery and following your dreams, *The Alchemist* is a short but profound novel that inspires readers to listen to their hearts and pursue their personal legends. Perfect for anyone seeking motivation or a reminder of life's deeper purpose.", + "refusal": null + } + } + ], + "usage": { "prompt_tokens": 14, "completion_tokens": 84, "total_tokens": 98 } +} diff --git a/test/fixtures/deepseek-system-request.json b/test/fixtures/deepseek-system-request.json index 7bbbb10..8239164 100644 --- a/test/fixtures/deepseek-system-request.json +++ b/test/fixtures/deepseek-system-request.json @@ -1,13 +1,13 @@ { "model": "deepseek/deepseek-chat", "messages": [ - { - "role": "system", - "content": "Please generate structured JSON responses that follow this exact schema:\n{\n \"title\": string,\n \"author\": string,\n \"year\": number,\n \"genre\": string,\n \"rating\": number (between 1-5)\n}\nDo not include any explanation or text outside of the JSON object." + { + "role": "system", + "content": "Please generate structured JSON responses that follow this exact schema:\n{\n \"title\": string,\n \"author\": string,\n \"year\": number,\n \"genre\": string,\n \"rating\": number (between 1-5)\n}\nDo not include any explanation or text outside of the JSON object." }, - { - "role": "user", - "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." + { + "role": "user", + "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." } ] -} \ No newline at end of file +} diff --git a/test/fixtures/deepseek-system-response.json b/test/fixtures/deepseek-system-response.json index 930a034..d6af374 100644 --- a/test/fixtures/deepseek-system-response.json +++ b/test/fixtures/deepseek-system-response.json @@ -1,11 +1,22 @@ - - - - - - - - - - -{"id":"gen-1742589615-WBu6qMubsTwF0bM1lIZr","provider":"Novita","model":"deepseek/deepseek-chat","object":"chat.completion","created":1742589615,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"{\"title\": \"The Great Gatsby\", \"author\": \"F. Scott Fitzgerald\", \"year\": 1925, \"genre\": \"Tragedy\", \"rating\": 4.5}","refusal":null}}],"system_fingerprint":"","usage":{"prompt_tokens":81,"completion_tokens":40,"total_tokens":121}} \ No newline at end of file +{ + "id": "gen-1742589615-WBu6qMubsTwF0bM1lIZr", + "provider": "Novita", + "model": "deepseek/deepseek-chat", + "object": "chat.completion", + "created": 1742589615, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "{\"title\": \"The Great Gatsby\", \"author\": \"F. Scott Fitzgerald\", \"year\": 1925, \"genre\": \"Tragedy\", \"rating\": 4.5}", + "refusal": null + } + } + ], + "system_fingerprint": "", + "usage": { "prompt_tokens": 81, "completion_tokens": 40, "total_tokens": 121 } +} diff --git a/test/fixtures/gemini-request.json b/test/fixtures/gemini-request.json index 204ffe4..8ef9bc7 100644 --- a/test/fixtures/gemini-request.json +++ b/test/fixtures/gemini-request.json @@ -24,4 +24,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/fixtures/gemini-response.json b/test/fixtures/gemini-response.json index 9ac712e..a732955 100644 --- a/test/fixtures/gemini-response.json +++ b/test/fixtures/gemini-response.json @@ -1,3 +1,21 @@ - - -{"id":"gen-1742583638-Bfz1nY5jVS7hdhVNkvLm","provider":"Google","model":"google/gemini-2.0-flash-001","object":"chat.completion","created":1742583638,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"STOP","index":0,"message":{"role":"assistant","content":"{\n \"author\": \"Ursula K. Le Guin\",\n \"genre\": \"Science Fiction\",\n \"rating\": 4.5,\n \"title\": \"The Left Hand of Darkness\",\n \"year\": 1969\n}","refusal":null}}],"usage":{"prompt_tokens":21,"completion_tokens":56,"total_tokens":77}} \ No newline at end of file +{ + "id": "gen-1742583638-Bfz1nY5jVS7hdhVNkvLm", + "provider": "Google", + "model": "google/gemini-2.0-flash-001", + "object": "chat.completion", + "created": 1742583638, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "STOP", + "index": 0, + "message": { + "role": "assistant", + "content": "{\n \"author\": \"Ursula K. Le Guin\",\n \"genre\": \"Science Fiction\",\n \"rating\": 4.5,\n \"title\": \"The Left Hand of Darkness\",\n \"year\": 1969\n}", + "refusal": null + } + } + ], + "usage": { "prompt_tokens": 21, "completion_tokens": 56, "total_tokens": 77 } +} diff --git a/test/fixtures/gemini-system-request.json b/test/fixtures/gemini-system-request.json index be81d4e..83ee148 100644 --- a/test/fixtures/gemini-system-request.json +++ b/test/fixtures/gemini-system-request.json @@ -10,4 +10,4 @@ "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." } ] -} \ No newline at end of file +} diff --git a/test/fixtures/gemini-system-response.json b/test/fixtures/gemini-system-response.json index 507083f..2fd1461 100644 --- a/test/fixtures/gemini-system-response.json +++ b/test/fixtures/gemini-system-response.json @@ -1,5 +1,21 @@ - - - - -{"id":"gen-1742583646-4lgjXIie97z7LYAlFScQ","provider":"Google AI Studio","model":"google/gemini-2.0-flash-001","object":"chat.completion","created":1742583646,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"STOP","index":0,"message":{"role":"assistant","content":"```json\n{\n \"title\": \"The Martian\",\n \"author\": \"Andy Weir\",\n \"year\": 2011,\n \"genre\": \"Science Fiction\",\n \"rating\": 5\n}\n```\n","refusal":null}}],"usage":{"prompt_tokens":86,"completion_tokens":54,"total_tokens":140}} \ No newline at end of file +{ + "id": "gen-1742583646-4lgjXIie97z7LYAlFScQ", + "provider": "Google AI Studio", + "model": "google/gemini-2.0-flash-001", + "object": "chat.completion", + "created": 1742583646, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "STOP", + "index": 0, + "message": { + "role": "assistant", + "content": "```json\n{\n \"title\": \"The Martian\",\n \"author\": \"Andy Weir\",\n \"year\": 2011,\n \"genre\": \"Science Fiction\",\n \"rating\": 5\n}\n```\n", + "refusal": null + } + } + ], + "usage": { "prompt_tokens": 86, "completion_tokens": 54, "total_tokens": 140 } +} diff --git a/test/fixtures/gpt4turbo-response.json b/test/fixtures/gpt4turbo-response.json index ee22457..e9420ce 100644 --- a/test/fixtures/gpt4turbo-response.json +++ b/test/fixtures/gpt4turbo-response.json @@ -1 +1,11 @@ -{"error":{"message":"Provider returned error","code":400,"metadata":{"raw":"{\n \"error\": {\n \"message\": \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\",\n \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\": null\n }\n}","provider_name":"OpenAI"}},"user_id":"user_2tzeHxGce6y8I6Eq9i8uyweh0dO"} \ No newline at end of file +{ + "error": { + "message": "Provider returned error", + "code": 400, + "metadata": { + "raw": "{\n \"error\": {\n \"message\": \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\",\n \"type\": \"invalid_request_error\",\n \"param\": null,\n \"code\": null\n }\n}", + "provider_name": "OpenAI" + } + }, + "user_id": "user_2tzeHxGce6y8I6Eq9i8uyweh0dO" +} diff --git a/test/fixtures/gpt4turbo-system-request.json b/test/fixtures/gpt4turbo-system-request.json index dda2b7c..3a59ccb 100644 --- a/test/fixtures/gpt4turbo-system-request.json +++ b/test/fixtures/gpt4turbo-system-request.json @@ -1,13 +1,13 @@ { "model": "openai/gpt-4-turbo", "messages": [ - { - "role": "system", - "content": "Please generate structured JSON responses that follow this exact schema:\n{\n \"title\": string,\n \"author\": string,\n \"year\": number,\n \"genre\": string,\n \"rating\": number (between 1-5)\n}\nDo not include any explanation or text outside of the JSON object." + { + "role": "system", + "content": "Please generate structured JSON responses that follow this exact schema:\n{\n \"title\": string,\n \"author\": string,\n \"year\": number,\n \"genre\": string,\n \"rating\": number (between 1-5)\n}\nDo not include any explanation or text outside of the JSON object." }, - { - "role": "user", - "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." + { + "role": "user", + "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." } ] -} \ No newline at end of file +} diff --git a/test/fixtures/gpt4turbo-system-response.json b/test/fixtures/gpt4turbo-system-response.json index 3b0439f..dd9b5f9 100644 --- a/test/fixtures/gpt4turbo-system-response.json +++ b/test/fixtures/gpt4turbo-system-response.json @@ -1,9 +1,22 @@ - - - - - - - - -{"id":"gen-1742590952-jmdj46CyfBxahHJ7pdnn","provider":"OpenAI","model":"openai/gpt-4-turbo","object":"chat.completion","created":1742590952,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"{\n \"title\": \"The Great Gatsby\",\n \"author\": \"F. Scott Fitzgerald\",\n \"year\": 1925,\n \"genre\": \"Novel\",\n \"rating\": 4.5\n}","refusal":null}}],"system_fingerprint":"fp_101a39fff3","usage":{"prompt_tokens":89,"completion_tokens":48,"total_tokens":137}} \ No newline at end of file +{ + "id": "gen-1742590952-jmdj46CyfBxahHJ7pdnn", + "provider": "OpenAI", + "model": "openai/gpt-4-turbo", + "object": "chat.completion", + "created": 1742590952, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "{\n \"title\": \"The Great Gatsby\",\n \"author\": \"F. Scott Fitzgerald\",\n \"year\": 1925,\n \"genre\": \"Novel\",\n \"rating\": 4.5\n}", + "refusal": null + } + } + ], + "system_fingerprint": "fp_101a39fff3", + "usage": { "prompt_tokens": 89, "completion_tokens": 48, "total_tokens": 137 } +} diff --git a/test/fixtures/llama3-request.json b/test/fixtures/llama3-request.json index 8a2b94d..f155dc6 100644 --- a/test/fixtures/llama3-request.json +++ b/test/fixtures/llama3-request.json @@ -22,4 +22,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/fixtures/llama3-response-real.json b/test/fixtures/llama3-response-real.json index ee83efa..c6b28ac 100644 --- a/test/fixtures/llama3-response-real.json +++ b/test/fixtures/llama3-response-real.json @@ -1,13 +1,21 @@ - - - - - - - - - - - - -{"id":"gen-1742589497-0sifo7hW6DxYLb7PmvyO","provider":"DeepInfra","model":"meta-llama/llama-3.3-70b-instruct","object":"chat.completion","created":1742589497,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"{\"title\": \"The Hitchhiker's Guide to the Galaxy\", \"author\": \"Douglas Adams\", \"description\": \"A comedic science fiction series that follows the misadventures of an unwitting human and his alien friend as they travel through space.\", \"genre\": \"Science Fiction, Comedy\"}\r\n","refusal":null}}],"usage":{"prompt_tokens":21,"completion_tokens":62,"total_tokens":83}} \ No newline at end of file +{ + "id": "gen-1742589497-0sifo7hW6DxYLb7PmvyO", + "provider": "DeepInfra", + "model": "meta-llama/llama-3.3-70b-instruct", + "object": "chat.completion", + "created": 1742589497, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "{\"title\": \"The Hitchhiker's Guide to the Galaxy\", \"author\": \"Douglas Adams\", \"description\": \"A comedic science fiction series that follows the misadventures of an unwitting human and his alien friend as they travel through space.\", \"genre\": \"Science Fiction, Comedy\"}\r\n", + "refusal": null + } + } + ], + "usage": { "prompt_tokens": 21, "completion_tokens": 62, "total_tokens": 83 } +} diff --git a/test/fixtures/llama3-response.json b/test/fixtures/llama3-response.json index 9fb93fe..9bb5b57 100644 --- a/test/fixtures/llama3-response.json +++ b/test/fixtures/llama3-response.json @@ -1,19 +1,22 @@ - - - - - - - - - - - - - - - - - - -{"id":"gen-1742589673-SC2SBXxZOwXggN1FPv1K","provider":"SambaNova","model":"meta-llama/llama-3.3-70b-instruct","object":"chat.completion","created":1742589673,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"**Title:** \"The Hitchhiker's Guide to the Galaxy\" \n**Author:** Douglas Adams \n**Genre:** Science Fiction, Comedy \n**Description:** An comedic adventure through space following the misadventures of an unwitting human and his alien friend after Earth's destruction. \n**Why Read:** Unique blend of humor and science fiction, with witty observations on human society and the universe.","refusal":null}}],"system_fingerprint":"fastcoe","usage":{"prompt_tokens":21,"completion_tokens":79,"total_tokens":100}} \ No newline at end of file +{ + "id": "gen-1742589673-SC2SBXxZOwXggN1FPv1K", + "provider": "SambaNova", + "model": "meta-llama/llama-3.3-70b-instruct", + "object": "chat.completion", + "created": 1742589673, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "**Title:** \"The Hitchhiker's Guide to the Galaxy\" \n**Author:** Douglas Adams \n**Genre:** Science Fiction, Comedy \n**Description:** An comedic adventure through space following the misadventures of an unwitting human and his alien friend after Earth's destruction. \n**Why Read:** Unique blend of humor and science fiction, with witty observations on human society and the universe.", + "refusal": null + } + } + ], + "system_fingerprint": "fastcoe", + "usage": { "prompt_tokens": 21, "completion_tokens": 79, "total_tokens": 100 } +} diff --git a/test/fixtures/llama3-system-request.json b/test/fixtures/llama3-system-request.json index ef138c6..854fe81 100644 --- a/test/fixtures/llama3-system-request.json +++ b/test/fixtures/llama3-system-request.json @@ -1,13 +1,13 @@ { "model": "meta-llama/llama-3.3-70b-instruct", "messages": [ - { - "role": "system", - "content": "Please generate structured JSON responses that follow this exact schema:\n{\n \"title\": string,\n \"author\": string,\n \"year\": number,\n \"genre\": string,\n \"rating\": number (between 1-5)\n}\nDo not include any explanation or text outside of the JSON object." + { + "role": "system", + "content": "Please generate structured JSON responses that follow this exact schema:\n{\n \"title\": string,\n \"author\": string,\n \"year\": number,\n \"genre\": string,\n \"rating\": number (between 1-5)\n}\nDo not include any explanation or text outside of the JSON object." }, - { - "role": "user", - "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." + { + "role": "user", + "content": "Give me a short book recommendation. Respond with only valid JSON matching the schema." } ] -} \ No newline at end of file +} diff --git a/test/fixtures/llama3-system-response.json b/test/fixtures/llama3-system-response.json index 19be6a5..734f236 100644 --- a/test/fixtures/llama3-system-response.json +++ b/test/fixtures/llama3-system-response.json @@ -1,9 +1,21 @@ - - - - - - - - -{"id":"gen-1742589606-w4AouBh4Kr0Pj1UkSA9Q","provider":"DeepInfra","model":"meta-llama/llama-3.3-70b-instruct","object":"chat.completion","created":1742589606,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"{\n \"title\": \"The Alchemist\",\n \"author\": \"Paulo Coelho\",\n \"year\": 1988,\n \"genre\": \"Fantasy\",\n \"rating\": 4\n}","refusal":null}}],"usage":{"prompt_tokens":93,"completion_tokens":44,"total_tokens":137}} \ No newline at end of file +{ + "id": "gen-1742589606-w4AouBh4Kr0Pj1UkSA9Q", + "provider": "DeepInfra", + "model": "meta-llama/llama-3.3-70b-instruct", + "object": "chat.completion", + "created": 1742589606, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "{\n \"title\": \"The Alchemist\",\n \"author\": \"Paulo Coelho\",\n \"year\": 1988,\n \"genre\": \"Fantasy\",\n \"rating\": 4\n}", + "refusal": null + } + } + ], + "usage": { "prompt_tokens": 93, "completion_tokens": 44, "total_tokens": 137 } +} diff --git a/test/fixtures/openai-request.json b/test/fixtures/openai-request.json index 4a65949..8a6fe41 100644 --- a/test/fixtures/openai-request.json +++ b/test/fixtures/openai-request.json @@ -24,4 +24,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/fixtures/openai-response.json b/test/fixtures/openai-response.json index ad269fd..ecdf906 100644 --- a/test/fixtures/openai-response.json +++ b/test/fixtures/openai-response.json @@ -1,5 +1,22 @@ - - - - -{"id":"gen-1742583569-OwvmbKxGTtpJsjzB2D5J","provider":"OpenAI","model":"openai/gpt-4o","object":"chat.completion","created":1742583569,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"{\"title\":\"Where the Crawdads Sing\",\"author\":\"Delia Owens\",\"year\":2018,\"genre\":\"Mystery, Coming-of-age\",\"rating\":4.8}","refusal":null}}],"system_fingerprint":"fp_90d33c15d4","usage":{"prompt_tokens":80,"completion_tokens":37,"total_tokens":117}} \ No newline at end of file +{ + "id": "gen-1742583569-OwvmbKxGTtpJsjzB2D5J", + "provider": "OpenAI", + "model": "openai/gpt-4o", + "object": "chat.completion", + "created": 1742583569, + "choices": [ + { + "logprobs": null, + "finish_reason": "stop", + "native_finish_reason": "stop", + "index": 0, + "message": { + "role": "assistant", + "content": "{\"title\":\"Where the Crawdads Sing\",\"author\":\"Delia Owens\",\"year\":2018,\"genre\":\"Mystery, Coming-of-age\",\"rating\":4.8}", + "refusal": null + } + } + ], + "system_fingerprint": "fp_90d33c15d4", + "usage": { "prompt_tokens": 80, "completion_tokens": 37, "total_tokens": 117 } +} diff --git a/test/fixtures/openai-stream-request.json b/test/fixtures/openai-stream-request.json index 4375785..f860842 100644 --- a/test/fixtures/openai-stream-request.json +++ b/test/fixtures/openai-stream-request.json @@ -25,4 +25,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/fixtures/openai-weather-request.json b/test/fixtures/openai-weather-request.json index 34ebc40..264c65e 100644 --- a/test/fixtures/openai-weather-request.json +++ b/test/fixtures/openai-weather-request.json @@ -31,4 +31,4 @@ } } } -} \ No newline at end of file +} diff --git a/test/simple.integration.basic-schema.test.ts b/test/integration/simple.integration.basic-schema.test.ts similarity index 73% rename from test/simple.integration.basic-schema.test.ts rename to test/integration/simple.integration.basic-schema.test.ts index 592c2e8..eb6e369 100644 --- a/test/simple.integration.basic-schema.test.ts +++ b/test/integration/simple.integration.basic-schema.test.ts @@ -1,9 +1,8 @@ -import { callAi, getMeta } from "../src/index"; -// import { Message } from "../src/types"; -import dotenv from "dotenv"; +import { callAi, getMeta, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it } from "vitest"; -// Load environment variables from .env file if present -dotenv.config(); +// import { Message } from "../src/types"; // Configure retry settings for flaky tests - use fewer retries with faster failures // jest.retryTimes(2, { logErrorsBeforeRetry: true }); @@ -12,7 +11,7 @@ dotenv.config(); // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -33,32 +32,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, - debugValue?: any, // Added optional debug value parameter -) => { - if (model.grade === "A") { - if (!condition) { - // Enhanced debug logging for failures - console.log(`DETAILED FAILURE for ${model.id}: ${message}`); - if (debugValue !== undefined) { - console.log( - "Debug value:", - typeof debugValue === "object" - ? JSON.stringify(debugValue, null, 2) - : debugValue, - ); - } - } - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -77,20 +50,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -114,7 +82,7 @@ describe("Simple callAi integration tests", () => { const result = await callAi( "Provide information about France. Population should be expressed in millions (e.g., 67.5 for 67.5 million people).", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, schema: { type: "object", @@ -134,20 +102,14 @@ describe("Simple callAi integration tests", () => { // const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but a ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but a ${typeof result} in ${modelName} model`); if (typeof result === "string") { // Try to parse as JSON try { // Log the entire response for debugging console.log(`\n===== Response from ${modelName} =====`); - console.log( - result.substring(0, 500) + (result.length > 500 ? "..." : ""), - ); + console.log(result.substring(0, 500) + (result.length > 500 ? "..." : "")); const data = JSON.parse(result); @@ -158,9 +120,7 @@ describe("Simple callAi integration tests", () => { // Verify actual API call timing const meta = getMeta(result); console.log(`\n===== Timing for ${modelName} =====`); - console.log( - JSON.stringify(meta?.timing || "No timing data", null, 2), - ); + console.log(JSON.stringify(meta?.timing || "No timing data", null, 2)); // Ensure the call took at least 5ms (to detect mocks or cached responses) if (meta?.timing?.duration !== undefined) { @@ -171,9 +131,7 @@ describe("Simple callAi integration tests", () => { meta.timing, ); } else { - console.warn( - `No timing information available for ${modelName} model`, - ); + console.warn(`No timing information available for ${modelName} model`); } expectOrWarn( @@ -185,12 +143,7 @@ describe("Simple callAi integration tests", () => { if (typeof data === "object" && data !== null) { // Check required fields - expectOrWarn( - modelId, - "capital" in data, - `Missing 'capital' in ${modelName} model response`, - Object.keys(data), - ); + expectOrWarn(modelId, "capital" in data, `Missing 'capital' in ${modelName} model response`, Object.keys(data)); expectOrWarn( modelId, "population" in data, @@ -227,10 +180,7 @@ describe("Simple callAi integration tests", () => { if (typeof data.population === "number") { // Population should be in a reasonable range (60-70 million for France) // Check if number is already in millions (under 100) or in absolute (over 1 million) - const populationInMillions = - data.population < 1000 - ? data.population - : data.population / 1000000; + const populationInMillions = data.population < 1000 ? data.population : data.population / 1000000; expectOrWarn( modelId, populationInMillions >= 60 && populationInMillions <= 70, @@ -252,11 +202,7 @@ describe("Simple callAi integration tests", () => { // Should include French expectOrWarn( modelId, - data.languages.some( - (lang: string) => - typeof lang === "string" && - lang.toLowerCase().includes("french"), - ), + data.languages.some((lang: string) => typeof lang === "string" && lang.toLowerCase().includes("french")), `Languages doesn't include French in ${modelName} model response`, data.languages, ); @@ -264,11 +210,7 @@ describe("Simple callAi integration tests", () => { } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } } }, diff --git a/test/simple.integration.key-refresh.test.ts b/test/integration/simple.integration.claude-json.test.ts similarity index 68% rename from test/simple.integration.key-refresh.test.ts rename to test/integration/simple.integration.claude-json.test.ts index efa268c..e65cdd8 100644 --- a/test/simple.integration.key-refresh.test.ts +++ b/test/integration/simple.integration.claude-json.test.ts @@ -1,9 +1,6 @@ -import { callAi, getMeta } from "../src/index"; -// import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, getMeta, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures // jest.retryTimes(2, { logErrorsBeforeRetry: true }); @@ -12,23 +9,7 @@ dotenv.config(); // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.LOW_BALANCE_OPENROUTER_API_KEY; - -// Set up environment variables for testing key refresh behavior -if (process.env.LOW_BALANCE_OPENROUTER_API_KEY) { - // Use the low balance key for triggering a refresh scenario - process.env.CALLAI_API_KEY = process.env.LOW_BALANCE_OPENROUTER_API_KEY; - - // Set the refresh endpoint to vibecode.garden if not already set - if (!process.env.CALLAI_REFRESH_ENDPOINT) { - process.env.CALLAI_REFRESH_ENDPOINT = "https://vibecode.garden"; - } - - // Set the refresh token for authentication - if (!process.env.CALL_AI_REFRESH_TOKEN) { - process.env.CALL_AI_REFRESH_TOKEN = "use-vibes"; - } -} +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -49,32 +30,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, - debugValue?: any, // Added optional debug value parameter -) => { - if (model.grade === "A") { - if (!condition) { - // Enhanced debug logging for failures - console.log(`DETAILED FAILURE for ${model.id}: ${message}`); - if (debugValue !== undefined) { - console.log( - "Debug value:", - typeof debugValue === "object" - ? JSON.stringify(debugValue, null, 2) - : debugValue, - ); - } - } - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -93,20 +48,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -130,10 +80,8 @@ describe("Simple callAi integration tests", () => { const result = await callAi( "Provide information about France. Population should be expressed in millions (e.g., 67.5 for 67.5 million people).", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, - debug: true, - max_tokens: 128000 - 200, schema: { type: "object", properties: { @@ -152,20 +100,14 @@ describe("Simple callAi integration tests", () => { // const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but a ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but a ${typeof result} in ${modelName} model`); if (typeof result === "string") { // Try to parse as JSON try { // Log the entire response for debugging console.log(`\n===== Response from ${modelName} =====`); - console.log( - result.substring(0, 500) + (result.length > 500 ? "..." : ""), - ); + console.log(result.substring(0, 500) + (result.length > 500 ? "..." : "")); const data = JSON.parse(result); @@ -176,9 +118,7 @@ describe("Simple callAi integration tests", () => { // Verify actual API call timing const meta = getMeta(result); console.log(`\n===== Timing for ${modelName} =====`); - console.log( - JSON.stringify(meta?.timing || "No timing data", null, 2), - ); + console.log(JSON.stringify(meta?.timing || "No timing data", null, 2)); // Ensure the call took at least 5ms (to detect mocks or cached responses) if (meta?.timing?.duration !== undefined) { @@ -189,9 +129,7 @@ describe("Simple callAi integration tests", () => { meta.timing, ); } else { - console.warn( - `No timing information available for ${modelName} model`, - ); + console.warn(`No timing information available for ${modelName} model`); } expectOrWarn( @@ -203,12 +141,7 @@ describe("Simple callAi integration tests", () => { if (typeof data === "object" && data !== null) { // Check required fields - expectOrWarn( - modelId, - "capital" in data, - `Missing 'capital' in ${modelName} model response`, - Object.keys(data), - ); + expectOrWarn(modelId, "capital" in data, `Missing 'capital' in ${modelName} model response`, Object.keys(data)); expectOrWarn( modelId, "population" in data, @@ -245,10 +178,7 @@ describe("Simple callAi integration tests", () => { if (typeof data.population === "number") { // Population should be in a reasonable range (60-70 million for France) // Check if number is already in millions (under 100) or in absolute (over 1 million) - const populationInMillions = - data.population < 1000 - ? data.population - : data.population / 1000000; + const populationInMillions = data.population < 1000 ? data.population : data.population / 1000000; expectOrWarn( modelId, populationInMillions >= 60 && populationInMillions <= 70, @@ -270,11 +200,7 @@ describe("Simple callAi integration tests", () => { // Should include French expectOrWarn( modelId, - data.languages.some( - (lang: string) => - typeof lang === "string" && - lang.toLowerCase().includes("french"), - ), + data.languages.some((lang: string) => typeof lang === "string" && lang.toLowerCase().includes("french")), `Languages doesn't include French in ${modelName} model response`, data.languages, ); @@ -282,11 +208,7 @@ describe("Simple callAi integration tests", () => { } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } } }, diff --git a/test/simple.integration.key-refresh-error.test.ts b/test/integration/simple.integration.key-refresh-error.test.ts similarity index 70% rename from test/simple.integration.key-refresh-error.test.ts rename to test/integration/simple.integration.key-refresh-error.test.ts index 7bcf696..6068551 100644 --- a/test/simple.integration.key-refresh-error.test.ts +++ b/test/integration/simple.integration.key-refresh-error.test.ts @@ -1,9 +1,6 @@ -import { callAi, getMeta } from "../src/index"; -// import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, getMeta, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it, expect } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures // jest.retryTimes(2, { logErrorsBeforeRetry: true }); @@ -12,23 +9,23 @@ dotenv.config(); // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.LOW_BALANCE_OPENROUTER_API_KEY; +// const haveApiKey = callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY; // Set up environment variables for testing key refresh behavior -if (process.env.LOW_BALANCE_OPENROUTER_API_KEY) { - // Use the low balance key for triggering a refresh scenario - process.env.CALLAI_API_KEY = process.env.LOW_BALANCE_OPENROUTER_API_KEY; - - // Set the refresh endpoint to vibecode.garden if not already set - if (!process.env.CALLAI_REFRESH_ENDPOINT) { - process.env.CALLAI_REFRESH_ENDPOINT = "https://vibecode.garden"; - } - - // Set the refresh token for authentication - if (!process.env.CALL_AI_REFRESH_TOKEN) { - process.env.CALL_AI_REFRESH_TOKEN = "use-vibes"; - } -} +// if (callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY) { +// Use the low balance key for triggering a refresh scenario +// callAiEnv.CALLAI_API_KEY = callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY; + +// Set the refresh endpoint to vibecode.garden if not already set +// if (!callAiEnv.CALLAI_REFRESH_ENDPOINT) { +// callAiEnv.CALLAI_REFRESH_ENDPOINT = "https://vibecode.garden"; +// } + +// Set the refresh token for authentication +// if (!callAiEnv.CALL_AI_REFRESH_TOKEN) { +// callAiEnv.CALL_AI_REFRESH_TOKEN = "use-vibes"; +// } +// } // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -49,35 +46,9 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, - debugValue?: any, // Added optional debug value parameter -) => { - if (model.grade === "A") { - if (!condition) { - // Enhanced debug logging for failures - console.log(`DETAILED FAILURE for ${model.id}: ${message}`); - if (debugValue !== undefined) { - console.log( - "Debug value:", - typeof debugValue === "object" - ? JSON.stringify(debugValue, null, 2) - : debugValue, - ); - } - } - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { - if (!haveApiKey) return it.skip; + if (!callAiEnv.CALLAI_API_KEY) return it.skip; if (modelId.grade === "A") { return it.concurrent; @@ -93,20 +64,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -130,7 +96,7 @@ describe("Simple callAi integration tests", () => { const result = await callAi( "Provide information about France. Population should be expressed in millions (e.g., 67.5 for 67.5 million people).", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, debug: true, refreshToken: "not-a-vibe", @@ -159,20 +125,14 @@ describe("Simple callAi integration tests", () => { // const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but a ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but a ${typeof result} in ${modelName} model`); if (typeof result === "string") { // Try to parse as JSON try { // Log the entire response for debugging console.log(`\n===== Response from ${modelName} =====`); - console.log( - result.substring(0, 500) + (result.length > 500 ? "..." : ""), - ); + console.log(result.substring(0, 500) + (result.length > 500 ? "..." : "")); const data = JSON.parse(result); @@ -183,9 +143,7 @@ describe("Simple callAi integration tests", () => { // Verify actual API call timing const meta = getMeta(result); console.log(`\n===== Timing for ${modelName} =====`); - console.log( - JSON.stringify(meta?.timing || "No timing data", null, 2), - ); + console.log(JSON.stringify(meta?.timing || "No timing data", null, 2)); // Ensure the call took at least 5ms (to detect mocks or cached responses) if (meta?.timing?.duration !== undefined) { @@ -196,9 +154,7 @@ describe("Simple callAi integration tests", () => { meta.timing, ); } else { - console.warn( - `No timing information available for ${modelName} model`, - ); + console.warn(`No timing information available for ${modelName} model`); } expectOrWarn( @@ -210,12 +166,7 @@ describe("Simple callAi integration tests", () => { if (typeof data === "object" && data !== null) { // Check required fields - expectOrWarn( - modelId, - "capital" in data, - `Missing 'capital' in ${modelName} model response`, - Object.keys(data), - ); + expectOrWarn(modelId, "capital" in data, `Missing 'capital' in ${modelName} model response`, Object.keys(data)); expectOrWarn( modelId, "population" in data, @@ -252,10 +203,7 @@ describe("Simple callAi integration tests", () => { if (typeof data.population === "number") { // Population should be in a reasonable range (60-70 million for France) // Check if number is already in millions (under 100) or in absolute (over 1 million) - const populationInMillions = - data.population < 1000 - ? data.population - : data.population / 1000000; + const populationInMillions = data.population < 1000 ? data.population : data.population / 1000000; expectOrWarn( modelId, populationInMillions >= 60 && populationInMillions <= 70, @@ -277,11 +225,7 @@ describe("Simple callAi integration tests", () => { // Should include French expectOrWarn( modelId, - data.languages.some( - (lang: string) => - typeof lang === "string" && - lang.toLowerCase().includes("french"), - ), + data.languages.some((lang: string) => typeof lang === "string" && lang.toLowerCase().includes("french")), `Languages doesn't include French in ${modelName} model response`, data.languages, ); @@ -289,11 +233,7 @@ describe("Simple callAi integration tests", () => { } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } } }, diff --git a/test/simple.integration.claude-json.test.ts b/test/integration/simple.integration.key-refresh.test.ts similarity index 73% rename from test/simple.integration.claude-json.test.ts rename to test/integration/simple.integration.key-refresh.test.ts index ce9088e..f275074 100644 --- a/test/simple.integration.claude-json.test.ts +++ b/test/integration/simple.integration.key-refresh.test.ts @@ -1,9 +1,8 @@ -import { callAi, getMeta } from "../src/index"; -// import { Message } from "../src/types"; -import dotenv from "dotenv"; +import { callAi, getMeta, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it } from "vitest"; -// Load environment variables from .env file if present -dotenv.config(); +// import { Message } from "../src/types"; // Configure retry settings for flaky tests - use fewer retries with faster failures // jest.retryTimes(2, { logErrorsBeforeRetry: true }); @@ -12,7 +11,23 @@ dotenv.config(); // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +// const haveApiKey = callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY; + +// // Set up environment variables for testing key refresh behavior +// if (callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY) { +// // Use the low balance key for triggering a refresh scenario +// callAiEnv.CALLAI_API_KEY = callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY; + +// // Set the refresh endpoint to vibecode.garden if not already set +// if (!callAiEnv.CALLAI_REFRESH_ENDPOINT) { +// callAiEnv.CALLAI_REFRESH_ENDPOINT = "https://vibecode.garden"; +// } + +// // Set the refresh token for authentication +// if (!callAiEnv.CALL_AI_REFRESH_TOKEN) { +// callAiEnv.CALL_AI_REFRESH_TOKEN = "use-vibes"; +// } +// } // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -33,35 +48,9 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, - debugValue?: any, // Added optional debug value parameter -) => { - if (model.grade === "A") { - if (!condition) { - // Enhanced debug logging for failures - console.log(`DETAILED FAILURE for ${model.id}: ${message}`); - if (debugValue !== undefined) { - console.log( - "Debug value:", - typeof debugValue === "object" - ? JSON.stringify(debugValue, null, 2) - : debugValue, - ); - } - } - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { - if (!haveApiKey) return it.skip; + if (!callAiEnv.CALLAI_API_KEY) return it.skip; if (modelId.grade === "A") { return it.concurrent; @@ -77,20 +66,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -114,8 +98,10 @@ describe("Simple callAi integration tests", () => { const result = await callAi( "Provide information about France. Population should be expressed in millions (e.g., 67.5 for 67.5 million people).", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, + debug: true, + max_tokens: 128000 - 200, schema: { type: "object", properties: { @@ -134,20 +120,14 @@ describe("Simple callAi integration tests", () => { // const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but a ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but a ${typeof result} in ${modelName} model`); if (typeof result === "string") { // Try to parse as JSON try { // Log the entire response for debugging console.log(`\n===== Response from ${modelName} =====`); - console.log( - result.substring(0, 500) + (result.length > 500 ? "..." : ""), - ); + console.log(result.substring(0, 500) + (result.length > 500 ? "..." : "")); const data = JSON.parse(result); @@ -158,9 +138,7 @@ describe("Simple callAi integration tests", () => { // Verify actual API call timing const meta = getMeta(result); console.log(`\n===== Timing for ${modelName} =====`); - console.log( - JSON.stringify(meta?.timing || "No timing data", null, 2), - ); + console.log(JSON.stringify(meta?.timing || "No timing data", null, 2)); // Ensure the call took at least 5ms (to detect mocks or cached responses) if (meta?.timing?.duration !== undefined) { @@ -171,9 +149,7 @@ describe("Simple callAi integration tests", () => { meta.timing, ); } else { - console.warn( - `No timing information available for ${modelName} model`, - ); + console.warn(`No timing information available for ${modelName} model`); } expectOrWarn( @@ -185,12 +161,7 @@ describe("Simple callAi integration tests", () => { if (typeof data === "object" && data !== null) { // Check required fields - expectOrWarn( - modelId, - "capital" in data, - `Missing 'capital' in ${modelName} model response`, - Object.keys(data), - ); + expectOrWarn(modelId, "capital" in data, `Missing 'capital' in ${modelName} model response`, Object.keys(data)); expectOrWarn( modelId, "population" in data, @@ -227,10 +198,7 @@ describe("Simple callAi integration tests", () => { if (typeof data.population === "number") { // Population should be in a reasonable range (60-70 million for France) // Check if number is already in millions (under 100) or in absolute (over 1 million) - const populationInMillions = - data.population < 1000 - ? data.population - : data.population / 1000000; + const populationInMillions = data.population < 1000 ? data.population : data.population / 1000000; expectOrWarn( modelId, populationInMillions >= 60 && populationInMillions <= 70, @@ -252,11 +220,7 @@ describe("Simple callAi integration tests", () => { // Should include French expectOrWarn( modelId, - data.languages.some( - (lang: string) => - typeof lang === "string" && - lang.toLowerCase().includes("french"), - ), + data.languages.some((lang: string) => typeof lang === "string" && lang.toLowerCase().includes("french")), `Languages doesn't include French in ${modelName} model response`, data.languages, ); @@ -264,11 +228,7 @@ describe("Simple callAi integration tests", () => { } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } } }, diff --git a/test/simple.integration.low-balance.test.ts b/test/integration/simple.integration.low-balance.test.ts similarity index 71% rename from test/simple.integration.low-balance.test.ts rename to test/integration/simple.integration.low-balance.test.ts index 11e1eb3..a6ec1c0 100644 --- a/test/simple.integration.low-balance.test.ts +++ b/test/integration/simple.integration.low-balance.test.ts @@ -1,16 +1,13 @@ -import { callAi } from "../src/index"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, callAiEnv } from "call-ai"; +import { describe, it, expect, assert } from "vitest"; // Skip tests if no API key is available -const haveApiKey = process.env.LOW_BALANCE_OPENROUTER_API_KEY; +// const haveApiKey = callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY; -// Copy LOW_BALANCE_OPENROUTER_API_KEY to CALLAI_API_KEY for this test -if (process.env.LOW_BALANCE_OPENROUTER_API_KEY) { - process.env.CALLAI_API_KEY = process.env.LOW_BALANCE_OPENROUTER_API_KEY; -} +// // Copy LOW_BALANCE_OPENROUTER_API_KEY to CALLAI_API_KEY for this test +// if (callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY) { +// callAiEnv.CALLAI_API_KEY = callAiEnv.LOW_BALANCE_OPENROUTER_API_KEY; +// } // Test models based on the OpenRouter documentation const supportedModels = { @@ -22,8 +19,10 @@ const modelEntries = Object.entries(supportedModels); describe("Low Balance API Key Tests", () => { // Skip the entire test suite if no low balance API key is available - if (!haveApiKey) { - it.skip("Skipping low balance tests - no LOW_BALANCE_OPENROUTER_API_KEY available", () => {}); + if (!callAiEnv.CALLAI_API_KEY) { + it.skip("Skipping low balance tests - no LOW_BALANCE_OPENROUTER_API_KEY available", () => { + /* no-op */ + }); return; } @@ -32,7 +31,7 @@ describe("Low Balance API Key Tests", () => { try { // Make API call with skipRefresh flag to ensure we get the low balance error await callAi("Provide information about France.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelInfo.id, max_tokens: 200000 - 200, // When implemented, this will skip the automatic key refresh @@ -51,7 +50,7 @@ describe("Low Balance API Key Tests", () => { }); // If we get here, the key wasn't actually low balance - test should fail - fail("Expected key limit exceeded error but got successful result"); + assert.fail("Expected key limit exceeded error but got successful result"); } catch (error) { // We expect a 403 error with "Key limit exceeded" message const errorStr = String(error); @@ -61,10 +60,7 @@ describe("Low Balance API Key Tests", () => { expect(errorStr).toContain("Key limit exceeded"); // Log the error for visibility - console.log( - "Received expected low balance error:", - errorStr.substring(0, 200), - ); + console.log("Received expected low balance error:", errorStr.substring(0, 200)); } }); }); diff --git a/test/simple.integration.nested-schema.test.ts b/test/integration/simple.integration.nested-schema.test.ts similarity index 60% rename from test/simple.integration.nested-schema.test.ts rename to test/integration/simple.integration.nested-schema.test.ts index 93d1c70..3c5a7e1 100644 --- a/test/simple.integration.nested-schema.test.ts +++ b/test/integration/simple.integration.nested-schema.test.ts @@ -1,18 +1,12 @@ -import { callAi, getMeta } from "../src/index"; -import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); - -// Configure retry settings for flaky tests - use fewer retries with faster failures -jest.retryTimes(2, { logErrorsBeforeRetry: true }); +import { callAi, getMeta, callAiEnv, Message } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it } from "vitest"; // Increase Jest's default timeout to handle all parallel requests // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -33,19 +27,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, -) => { - if (model.grade === "A") { - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -64,20 +45,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -106,7 +82,7 @@ describe("Simple callAi integration tests", () => { }, ] as Message[], { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, schema: { type: "object", @@ -157,50 +133,19 @@ describe("Simple callAi integration tests", () => { const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but a ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but a ${typeof result} in ${modelName} model`); // Log raw response information - console.log( - `Raw response for ${modelId.id}:`, - resultMeta?.rawResponse ? "available" : "undefined", - ); + console.log(`Raw response for ${modelId.id}:`, resultMeta?.rawResponse ? "available" : "undefined"); // Verify metadata - expectOrWarn( - modelId, - !!resultMeta, - `Metadata should be defined for ${modelName} model`, - ); + expectOrWarn(modelId, !!resultMeta, `Metadata should be defined for ${modelName} model`); if (resultMeta) { - expectOrWarn( - modelId, - !!resultMeta.model, - `Model should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing, - `Timing should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing?.startTime, - `Start time should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing?.endTime, - `End time should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.rawResponse, - `Raw response should be defined in metadata for ${modelName}`, - ); + expectOrWarn(modelId, !!resultMeta.model, `Model should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing, `Timing should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing?.startTime, `Start time should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing?.endTime, `End time should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.rawResponse, `Raw response should be defined in metadata for ${modelName}`); } if (typeof result === "string") { @@ -214,29 +159,13 @@ describe("Simple callAi integration tests", () => { if (typeof data === "object" && data !== null) { // Check root object - expectOrWarn( - modelId, - "root" in data, - `Missing 'root' in ${modelName} model response`, - ); + expectOrWarn(modelId, "root" in data, `Missing 'root' in ${modelName} model response`); if ("root" in data && typeof data.root === "object") { // Check root properties - expectOrWarn( - modelId, - "name" in data.root, - `Missing 'root.name' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - "type" in data.root, - `Missing 'root.type' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - "children" in data.root, - `Missing 'root.children' in ${modelName} model response`, - ); + expectOrWarn(modelId, "name" in data.root, `Missing 'root.name' in ${modelName} model response`); + expectOrWarn(modelId, "type" in data.root, `Missing 'root.type' in ${modelName} model response`); + expectOrWarn(modelId, "children" in data.root, `Missing 'root.children' in ${modelName} model response`); if ("name" in data.root) expectOrWarn( @@ -258,33 +187,14 @@ describe("Simple callAi integration tests", () => { ); // Check first level of nesting - if ( - Array.isArray(data.root.children) && - data.root.children.length > 0 - ) { + if (Array.isArray(data.root.children) && data.root.children.length > 0) { const firstChild = data.root.children[0]; - expectOrWarn( - modelId, - !!firstChild, - `First child is undefined in ${modelName} model response`, - ); + expectOrWarn(modelId, !!firstChild, `First child is undefined in ${modelName} model response`); if (firstChild) { - expectOrWarn( - modelId, - !!firstChild.name, - `Missing 'firstChild.name' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!firstChild.type, - `Missing 'firstChild.type' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!firstChild.children, - `Missing 'firstChild.children' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!firstChild.name, `Missing 'firstChild.name' in ${modelName} model response`); + expectOrWarn(modelId, !!firstChild.type, `Missing 'firstChild.type' in ${modelName} model response`); + expectOrWarn(modelId, !!firstChild.children, `Missing 'firstChild.children' in ${modelName} model response`); if (firstChild.name) expectOrWarn( @@ -306,28 +216,13 @@ describe("Simple callAi integration tests", () => { ); // Check for at least one file in the second level - if ( - Array.isArray(firstChild.children) && - firstChild.children.length > 0 - ) { + if (Array.isArray(firstChild.children) && firstChild.children.length > 0) { const secondChild = firstChild.children[0]; - expectOrWarn( - modelId, - !!secondChild, - `Second child is undefined in ${modelName} model response`, - ); + expectOrWarn(modelId, !!secondChild, `Second child is undefined in ${modelName} model response`); if (secondChild) { - expectOrWarn( - modelId, - !!secondChild.name, - `Missing 'secondChild.name' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!secondChild.type, - `Missing 'secondChild.type' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!secondChild.name, `Missing 'secondChild.name' in ${modelName} model response`); + expectOrWarn(modelId, !!secondChild.type, `Missing 'secondChild.type' in ${modelName} model response`); if (secondChild.name) expectOrWarn( @@ -348,11 +243,7 @@ describe("Simple callAi integration tests", () => { } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } } }, diff --git a/test/simple.integration.playlist.test.ts b/test/integration/simple.integration.playlist.test.ts similarity index 62% rename from test/simple.integration.playlist.test.ts rename to test/integration/simple.integration.playlist.test.ts index d153094..452be44 100644 --- a/test/simple.integration.playlist.test.ts +++ b/test/integration/simple.integration.playlist.test.ts @@ -1,18 +1,15 @@ -import { callAi, getMeta } from "../src/index"; -import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, getMeta, callAiEnv, Message } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures -jest.retryTimes(2, { logErrorsBeforeRetry: true }); +// jest.retryTimes(2, { logErrorsBeforeRetry: true }); // Increase Jest's default timeout to handle all parallel requests // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -34,17 +31,6 @@ const supportedModels = { const modelEntries = Object.entries(supportedModels); // Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, -) => { - if (model.grade === "A") { - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { @@ -64,20 +50,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -103,12 +84,11 @@ describe("Simple callAi integration tests", () => { [ { role: "user" as const, - content: - "Create a themed playlist for a relaxing evening with 3-5 songs.", + content: "Create a themed playlist for a relaxing evening with 3-5 songs.", }, ] as Message[], { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, schema: { type: "object", @@ -139,50 +119,19 @@ describe("Simple callAi integration tests", () => { const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but ${typeof result} in ${modelName} model`); // Log raw response information - console.log( - `Raw response for ${modelId.id}:`, - resultMeta?.rawResponse ? "available" : "undefined", - ); + console.log(`Raw response for ${modelId.id}:`, resultMeta?.rawResponse ? "available" : "undefined"); // Verify metadata - expectOrWarn( - modelId, - !!resultMeta, - `Metadata should be defined for ${modelName} model`, - ); + expectOrWarn(modelId, !!resultMeta, `Metadata should be defined for ${modelName} model`); if (resultMeta) { - expectOrWarn( - modelId, - !!resultMeta.model, - `Model should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing, - `Timing should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing?.startTime, - `Start time should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing?.endTime, - `End time should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.rawResponse, - `Raw response should be defined in metadata for ${modelName}`, - ); + expectOrWarn(modelId, !!resultMeta.model, `Model should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing, `Timing should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing?.startTime, `Start time should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing?.endTime, `End time should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.rawResponse, `Raw response should be defined in metadata for ${modelName}`); } if (typeof result === "string") { @@ -196,46 +145,22 @@ describe("Simple callAi integration tests", () => { if (typeof data === "object" && data !== null) { // Check required fields - expectOrWarn( - modelId, - "title" in data, - `Missing 'title' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - "theme" in data, - `Missing 'theme' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - "songs" in data, - `Missing 'songs' in ${modelName} model response`, - ); + expectOrWarn(modelId, "title" in data, `Missing 'title' in ${modelName} model response`); + expectOrWarn(modelId, "theme" in data, `Missing 'theme' in ${modelName} model response`); + expectOrWarn(modelId, "songs" in data, `Missing 'songs' in ${modelName} model response`); // Check title and theme if ("title" in data) { - expectOrWarn( - modelId, - typeof data.title === "string", - `'title' is not a string in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.title === "string", `'title' is not a string in ${modelName} model response`); } if ("theme" in data) { - expectOrWarn( - modelId, - typeof data.theme === "string", - `'theme' is not a string in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.theme === "string", `'theme' is not a string in ${modelName} model response`); } // Check songs array if ("songs" in data) { - expectOrWarn( - modelId, - Array.isArray(data.songs), - `'songs' is not an array in ${modelName} model response`, - ); + expectOrWarn(modelId, Array.isArray(data.songs), `'songs' is not an array in ${modelName} model response`); if (Array.isArray(data.songs)) { expectOrWarn( @@ -255,11 +180,7 @@ describe("Simple callAi integration tests", () => { if (typeof firstSong === "object" && firstSong !== null) { // Check required properties - expectOrWarn( - modelId, - "title" in firstSong, - `Missing 'title' in first song in ${modelName} model response`, - ); + expectOrWarn(modelId, "title" in firstSong, `Missing 'title' in first song in ${modelName} model response`); expectOrWarn( modelId, "artist" in firstSong, @@ -288,11 +209,7 @@ describe("Simple callAi integration tests", () => { } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } } }, diff --git a/test/simple.integration.recipes.test.ts b/test/integration/simple.integration.recipes.test.ts similarity index 63% rename from test/simple.integration.recipes.test.ts rename to test/integration/simple.integration.recipes.test.ts index 2830ea1..6d12036 100644 --- a/test/simple.integration.recipes.test.ts +++ b/test/integration/simple.integration.recipes.test.ts @@ -1,18 +1,17 @@ -import { callAi, getMeta } from "../src/index"; -// import { Message } from "../src/types"; -import dotenv from "dotenv"; +import { callAi, getMeta, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it } from "vitest"; -// Load environment variables from .env file if present -dotenv.config(); +// import { Message } from "../src/types"; // Configure retry settings for flaky tests - use fewer retries with faster failures -jest.retryTimes(2, { logErrorsBeforeRetry: true }); +// jest.retryTimes(2, { logErrorsBeforeRetry: true }); // Increase Jest's default timeout to handle all parallel requests // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -33,19 +32,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, -) => { - if (model.grade === "A") { - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -64,20 +50,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -100,7 +81,7 @@ describe("Simple callAi integration tests", () => { async () => { // Make API call with a recipe schema const result = await callAi("Create a recipe for a healthy dinner.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, schema: { type: "object", @@ -126,15 +107,7 @@ describe("Simple callAi integration tests", () => { cook_time_minutes: { type: "number" }, servings: { type: "number" }, }, - required: [ - "title", - "description", - "ingredients", - "steps", - "prep_time_minutes", - "cook_time_minutes", - "servings", - ], + required: ["title", "description", "ingredients", "steps", "prep_time_minutes", "cook_time_minutes", "servings"], }, }); @@ -142,44 +115,16 @@ describe("Simple callAi integration tests", () => { const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but ${typeof result} in ${modelName} model`); // Verify metadata - expectOrWarn( - modelId, - !!resultMeta, - `Metadata should be defined for ${modelName} model`, - ); + expectOrWarn(modelId, !!resultMeta, `Metadata should be defined for ${modelName} model`); if (resultMeta) { - expectOrWarn( - modelId, - !!resultMeta.model, - `Model should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing, - `Timing should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing?.startTime, - `Start time should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.timing?.endTime, - `End time should be defined in metadata for ${modelName}`, - ); - expectOrWarn( - modelId, - !!resultMeta.rawResponse, - `Raw response should be defined in metadata for ${modelName}`, - ); + expectOrWarn(modelId, !!resultMeta.model, `Model should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing, `Timing should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing?.startTime, `Start time should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.timing?.endTime, `End time should be defined in metadata for ${modelName}`); + expectOrWarn(modelId, !!resultMeta.rawResponse, `Raw response should be defined in metadata for ${modelName}`); } if (typeof result === "string") { @@ -204,26 +149,14 @@ describe("Simple callAi integration tests", () => { ]; for (const field of requiredFields) { - expectOrWarn( - modelId, - field in data, - `Missing '${field}' in ${modelName} model response`, - ); + expectOrWarn(modelId, field in data, `Missing '${field}' in ${modelName} model response`); } // Validate types and some basic content if ("title" in data) { - expectOrWarn( - modelId, - typeof data.title === "string", - `'title' is not a string in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.title === "string", `'title' is not a string in ${modelName} model response`); if (typeof data.title === "string") { - expectOrWarn( - modelId, - data.title.length > 3, - `Title too short in ${modelName} model response`, - ); + expectOrWarn(modelId, data.title.length > 3, `Title too short in ${modelName} model response`); } } @@ -234,11 +167,7 @@ describe("Simple callAi integration tests", () => { `'description' is not a string in ${modelName} model response`, ); if (typeof data.description === "string") { - expectOrWarn( - modelId, - data.description.length > 10, - `Description too short in ${modelName} model response`, - ); + expectOrWarn(modelId, data.description.length > 10, `Description too short in ${modelName} model response`); } } @@ -249,26 +178,18 @@ describe("Simple callAi integration tests", () => { `'ingredients' is not an array in ${modelName} model response`, ); if (Array.isArray(data.ingredients)) { - expectOrWarn( - modelId, - data.ingredients.length > 0, - `No ingredients in ${modelName} model response`, - ); + expectOrWarn(modelId, data.ingredients.length > 0, `No ingredients in ${modelName} model response`); // Check first ingredient if (data.ingredients.length > 0) { const firstIngredient = data.ingredients[0]; expectOrWarn( modelId, - typeof firstIngredient === "object" && - firstIngredient !== null, + typeof firstIngredient === "object" && firstIngredient !== null, `First ingredient is not an object in ${modelName} model response`, ); - if ( - typeof firstIngredient === "object" && - firstIngredient !== null - ) { + if (typeof firstIngredient === "object" && firstIngredient !== null) { expectOrWarn( modelId, "name" in firstIngredient, @@ -301,17 +222,9 @@ describe("Simple callAi integration tests", () => { } if ("steps" in data) { - expectOrWarn( - modelId, - Array.isArray(data.steps), - `'steps' is not an array in ${modelName} model response`, - ); + expectOrWarn(modelId, Array.isArray(data.steps), `'steps' is not an array in ${modelName} model response`); if (Array.isArray(data.steps)) { - expectOrWarn( - modelId, - data.steps.length > 0, - `No steps in ${modelName} model response`, - ); + expectOrWarn(modelId, data.steps.length > 0, `No steps in ${modelName} model response`); // Check first step if (data.steps.length > 0) { @@ -325,11 +238,7 @@ describe("Simple callAi integration tests", () => { } // Check numeric fields - const numericFields = [ - "prep_time_minutes", - "cook_time_minutes", - "servings", - ]; + const numericFields = ["prep_time_minutes", "cook_time_minutes", "servings"]; for (const field of numericFields) { if (field in data) { expectOrWarn( @@ -338,21 +247,13 @@ describe("Simple callAi integration tests", () => { `'${field}' is not a number in ${modelName} model response`, ); if (typeof data[field] === "number") { - expectOrWarn( - modelId, - data[field] > 0, - `'${field}' is not positive in ${modelName} model response`, - ); + expectOrWarn(modelId, data[field] > 0, `'${field}' is not positive in ${modelName} model response`); } } } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } } }, diff --git a/test/simple.integration.streaming.test.ts b/test/integration/simple.integration.streaming.test.ts similarity index 70% rename from test/simple.integration.streaming.test.ts rename to test/integration/simple.integration.streaming.test.ts index 6e61fba..02246e0 100644 --- a/test/simple.integration.streaming.test.ts +++ b/test/integration/simple.integration.streaming.test.ts @@ -1,18 +1,14 @@ -import { callAi } from "../src/index"; -// import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, it } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures -jest.retryTimes(2, { logErrorsBeforeRetry: true }); // Increase Jest's default timeout to handle all parallel requests // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -33,19 +29,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, -) => { - if (model.grade === "A") { - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -64,20 +47,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -98,14 +76,11 @@ describe("Simple callAi integration tests", () => { `should generate text with ${modelName} model with streaming`, async () => { // Make a simple non-structured API call with streaming - const generator = await callAi( - "Write a short joke about programming.", - { - apiKey: process.env.CALLAI_API_KEY, - model: modelId.id, - stream: true, - }, - ); + const generator = await callAi("Write a short joke about programming.", { + apiKey: callAiEnv.CALLAI_API_KEY, + model: modelId.id, + stream: true, + }); // Get the metadata for the streaming response // const resultMeta = getMeta(generator); @@ -119,11 +94,7 @@ describe("Simple callAi integration tests", () => { // Manual type assertion to help TypeScript recognize generator as AsyncGenerator if (typeof generator === "object" && generator !== null) { - const asyncGenerator = generator as AsyncGenerator< - string, - string, - unknown - >; + const asyncGenerator = generator as AsyncGenerator; // Collect all chunks let finalResult = ""; @@ -146,13 +117,8 @@ describe("Simple callAi integration tests", () => { ); } catch (error) { // Log error but don't fail test for B/C grade models - const errorMessage = - error instanceof Error ? error.message : String(error); - expectOrWarn( - modelId, - false, - `Streaming error in ${modelName} model: ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + expectOrWarn(modelId, false, `Streaming error in ${modelName} model: ${errorMessage}`); } } }, diff --git a/test/simple.integration.system.test.ts b/test/integration/simple.integration.system.test.ts similarity index 71% rename from test/simple.integration.system.test.ts rename to test/integration/simple.integration.system.test.ts index 0a4f304..b1df70d 100644 --- a/test/simple.integration.system.test.ts +++ b/test/integration/simple.integration.system.test.ts @@ -1,18 +1,15 @@ -import { callAi, getMeta } from "../src/index"; -import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, getMeta, callAiEnv, Message } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, expect, it } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures -jest.retryTimes(2, { logErrorsBeforeRetry: true }); +// jest.retryTimes(2, { logErrorsBeforeRetry: true }); // Increase Jest's default timeout to handle all parallel requests // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -33,19 +30,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, -) => { - if (model.grade === "A") { - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -64,20 +48,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -101,8 +80,7 @@ describe("Simple callAi integration tests", () => { const messages = [ { role: "system" as const, - content: - "You are a helpful assistant that provides only factual information.", + content: "You are a helpful assistant that provides only factual information.", }, { role: "user" as const, @@ -112,7 +90,7 @@ describe("Simple callAi integration tests", () => { // Make API call with message array const result = await callAi(messages, { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, }); @@ -120,17 +98,9 @@ describe("Simple callAi integration tests", () => { const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - typeof result === "string", - `Result is not a string but a ${typeof result} in ${modelName} model`, - ); + expectOrWarn(modelId, typeof result === "string", `Result is not a string but a ${typeof result} in ${modelName} model`); if (typeof result === "string") { - expectOrWarn( - modelId, - result.length > 50, - `Result length (${result.length}) too short in ${modelName} model`, - ); + expectOrWarn(modelId, result.length > 50, `Result length (${result.length}) too short in ${modelName} model`); // Should mention France somewhere in the response expectOrWarn( modelId, diff --git a/test/simple.integration.test.ts b/test/integration/simple.integration.test.ts similarity index 72% rename from test/simple.integration.test.ts rename to test/integration/simple.integration.test.ts index a09cac3..deb0d1d 100644 --- a/test/simple.integration.test.ts +++ b/test/integration/simple.integration.test.ts @@ -1,18 +1,15 @@ -import { callAi, getMeta } from "../src/index"; -// import { Message } from "../src/types"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, getMeta, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, expect, it } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures -jest.retryTimes(2, { logErrorsBeforeRetry: true }); +// jest.retryTimes(2, { logErrorsBeforeRetry: true }); // Increase Jest's default timeout to handle all parallel requests // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; // const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -33,19 +30,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, -) => { - if (model.grade === "A") { - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -64,20 +48,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -99,7 +78,7 @@ describe("Simple callAi integration tests", () => { async () => { // Make a simple non-structured API call const result = await callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, }); @@ -107,11 +86,7 @@ describe("Simple callAi integration tests", () => { const resultMeta = getMeta(result); // Verify response - expectOrWarn( - modelId, - !!result, - `should generate text with ${modelName} model without streaming`, - ); + expectOrWarn(modelId, !!result, `should generate text with ${modelName} model without streaming`); expect(typeof result).toBe("string"); expect((result as string).length).toBeGreaterThan(10); @@ -121,9 +96,7 @@ describe("Simple callAi integration tests", () => { expect(resultMeta?.timing).toBeDefined(); expect(resultMeta?.timing?.startTime).toBeDefined(); expect(resultMeta?.timing?.endTime).toBeDefined(); - expect(resultMeta?.timing?.startTime).toBeLessThanOrEqual( - resultMeta?.timing?.endTime as number, - ); + expect(resultMeta?.timing?.startTime).toBeLessThanOrEqual(resultMeta?.timing?.endTime as number); expect(resultMeta?.rawResponse).toBeDefined(); }, TIMEOUT, diff --git a/test/integration/vitest.config.ts b/test/integration/vitest.config.ts new file mode 100644 index 0000000..19460df --- /dev/null +++ b/test/integration/vitest.config.ts @@ -0,0 +1,9 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + retry: 2, + name: "integration", + include: ["*test.?(c|m)[jt]s?(x)"], + }, +}); diff --git a/test/make-curl-request.js b/test/make-curl-request.ts similarity index 58% rename from test/make-curl-request.js rename to test/make-curl-request.ts index faafe02..802a4c9 100644 --- a/test/make-curl-request.js +++ b/test/make-curl-request.ts @@ -1,19 +1,19 @@ // Script to make a curl request using API key from .env -const fs = require('fs'); -const { execSync } = require('child_process'); -require('dotenv').config(); +import * as fs from "node:fs"; +import { execSync } from "child_process"; +import * as process from "node:process"; // Get API key from environment, trying both variables const apiKey = process.env.CALLAI_API_KEY || process.env.OPENROUTER_API_KEY; if (!apiKey) { - console.error('Error: No API key found. Please set CALLAI_API_KEY or OPENROUTER_API_KEY in your .env file.'); + console.error("Error: No API key found. Please set CALLAI_API_KEY or OPENROUTER_API_KEY in your .env file."); process.exit(1); } // Read the request file -const requestFile = process.argv[2] || 'test/fixtures/claude-tool-request.json'; -const responseFile = process.argv[3] || 'test/fixtures/claude-tool-response.json'; +const requestFile = process.argv[2] || "test/fixtures/claude-tool-request.json"; +const responseFile = process.argv[3] || "test/fixtures/claude-tool-response.json"; console.log(`Making request using ${requestFile} and saving to ${responseFile}`); @@ -23,13 +23,13 @@ const curlCmd = `curl -X POST "https://openrouter.ai/api/v1/chat/completions" -H try { // Execute curl command and capture output const output = execSync(curlCmd).toString(); - + // Save response to file fs.writeFileSync(responseFile, output); - - console.log('Response saved to', responseFile); - console.log('Response preview:'); - console.log(output.substring(0, 500) + (output.length > 500 ? '...' : '')); + + console.log("Response saved to", responseFile); + console.log("Response preview:"); + console.log(output.substring(0, 500) + (output.length > 500 ? "..." : "")); } catch (error) { - console.error('Error executing curl command:', error.message); -} \ No newline at end of file + console.error("Error executing curl command:", (error as Error).message); +} diff --git a/test/package.json b/test/package.json new file mode 100644 index 0000000..6f7f9fa --- /dev/null +++ b/test/package.json @@ -0,0 +1,44 @@ +{ + "name": "call-ai-test", + "version": "0.0.0", + "private": true, + "description": "Lightweight library for making AI API calls with streaming support", + "repository": { + "type": "git", + "url": "https://github.com/fireproof-storage/call-ai.git" + }, + "homepage": "https://github.com/fireproof-storage/call-ai", + "bugs": { + "url": "https://github.com/fireproof-storage/call-ai/issues" + }, + "scripts": { + "build": "tsc", + "test": "vitest --run", + "test:integration": "vitest simple.integration", + "test:all": "pnpm test && pnpm test:integration" + }, + "keywords": [ + "ai", + "llm", + "api", + "call", + "openai", + "streaming", + "openrouter" + ], + "contributors": [ + "J Chris Anderson", + "Meno Abels" + ], + "license": "Apache-2.0", + "devDependencies": { + "@types/node": "^24.0.15", + "typescript": "^5.8.3" + }, + "engines": { + "node": ">=20.0.0" + }, + "dependencies": { + "call-ai": "workspace:0.0.0" + } +} diff --git a/test/simple.claude.test.ts b/test/simple.claude.test.ts index 8fcd207..718e06d 100644 --- a/test/simple.claude.test.ts +++ b/test/simple.claude.test.ts @@ -1,11 +1,12 @@ -import { callAi } from "../src/index"; +import { vitest, describe, it, expect, beforeEach, Mock } from "vitest"; +import { callAi } from "call-ai"; // Mock global fetch -global.fetch = jest.fn(); +global.fetch = vitest.fn(); // Simple mock for TextDecoder -global.TextDecoder = jest.fn().mockImplementation(() => ({ - decode: jest.fn((value) => { +global.TextDecoder = vitest.fn().mockImplementation(() => ({ + decode: vitest.fn((value) => { // Basic mock implementation without recursion if (value instanceof Uint8Array) { // Convert the Uint8Array to a simple string @@ -19,7 +20,7 @@ global.TextDecoder = jest.fn().mockImplementation(() => ({ describe("Claude JSON Property Splitting Test", () => { beforeEach(() => { - jest.clearAllMocks(); + vitest.clearAllMocks(); }); it("should handle property name splitting across chunks", async () => { @@ -41,26 +42,26 @@ describe("Claude JSON Property Splitting Test", () => { // Create a simple mock that focuses on the specific property splitting issue const mockResponse = { + clone: () => mockResponse, ok: true, status: 200, headers: { - forEach: jest.fn(), + forEach: vitest.fn(), }, body: { - getReader: jest.fn().mockReturnValue({ - read: jest + getReader: vitest.fn().mockReturnValue({ + read: vitest .fn() // Streaming setup chunk .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"type":"message_start"}\n\n`, - ), + value: new TextEncoder().encode(`data: {"type":"message_start"}\n\n`), }) // First part with split property "popul" .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( + // eslint-disable-next-line no-useless-escape `data: {"type":"content_block_delta","delta":{"text":"{\\\"capital\\\":\\\"Paris\\\", \\\"popul"}}\n\n`, ), }) @@ -68,15 +69,14 @@ describe("Claude JSON Property Splitting Test", () => { .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( + // eslint-disable-next-line no-useless-escape `data: {"type":"content_block_delta","delta":{"text":"ation\\\":67.5, \\\"languages\\\":[\\\"French\\\"]}"}}\n\n`, ), }) // Final chunk with tool_calls completion signal .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"type":"message_delta","delta":{"stop_reason":"tool_calls"}}\n\n`, - ), + value: new TextEncoder().encode(`data: {"type":"message_delta","delta":{"stop_reason":"tool_calls"}}\n\n`), }) // End of stream .mockResolvedValueOnce({ @@ -87,12 +87,9 @@ describe("Claude JSON Property Splitting Test", () => { }; // Override global.fetch mock for this test - (global.fetch as jest.Mock).mockResolvedValueOnce(mockResponse); + (global.fetch as Mock).mockResolvedValueOnce(mockResponse); - const generator = (await callAi( - "Provide information about France.", - options, - )) as AsyncGenerator; + const generator = (await callAi("Provide information about France.", options)) as AsyncGenerator; // The expected final parsed result const expectedResult = { diff --git a/test/test-helper.ts b/test/test-helper.ts new file mode 100644 index 0000000..69c2c55 --- /dev/null +++ b/test/test-helper.ts @@ -0,0 +1,34 @@ +import { it, expect, TestAPI } from "vitest"; + +export function itif(condition: boolean): TestAPI | TestAPI["skip"] { + return condition ? it : it.skip; +} + +// Function to handle test expectations based on model grade +export function expectOrWarn( + model: { id: string; grade: string }, + condition: boolean, + message: string, + debugValue?: unknown, // Added optional debug value parameter +) { + if (model.grade === "A") { + if (!condition) { + // Enhanced debug logging for failures + console.log(`DETAILED FAILURE for ${model.id}: ${message}`); + if (debugValue !== undefined) { + console.log("Debug value:", typeof debugValue === "object" ? JSON.stringify(debugValue, null, 2) : debugValue); + } + } + expect(condition).toBe(true); + } else if (!condition) { + console.warn(`Warning (${model.id}): ${message}`); + } +} + +export function entriesHeaders(headers: Headers) { + const entries: [string, string][] = []; + headers.forEach((value, key) => { + entries.push([key, value]); + }); + return entries; +} diff --git a/test/callai-vision.integration.no-await.test.ts b/test/unit/callai-vision.integration.no-await.test.ts similarity index 84% rename from test/callai-vision.integration.no-await.test.ts rename to test/unit/callai-vision.integration.no-await.test.ts index ca98164..cca0500 100644 --- a/test/callai-vision.integration.no-await.test.ts +++ b/test/unit/callai-vision.integration.no-await.test.ts @@ -1,14 +1,11 @@ -import { callAi, ContentItem } from "../src/index"; -import dotenv from "dotenv"; +import { callAi, ContentItem, callAiEnv } from "call-ai"; import fs from "fs"; import path from "path"; - -// Load environment variables from .env file if present -dotenv.config(); +import { itif } from "../test-helper.js"; +import { describe, expect } from "vitest"; // Skip tests if no API key is available -const haveApiKey = process.env.OPENROUTER_API_KEY || process.env.CALLAI_API_KEY; -const itif = (condition: boolean) => (condition ? it : it.skip); +const haveApiKey = callAiEnv.CALLAI_API_KEY; // Timeout for image recognition tests const TIMEOUT = 30000; @@ -46,7 +43,7 @@ describe("Call-AI Vision Recognition", () => { try { // Call the callAi function with the vision model const result = await callAi([{ role: "user", content }], { - apiKey: process.env.OPENROUTER_API_KEY || process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "openai/gpt-4o-2024-08-06", }); diff --git a/test/callai-vision.integration.test.ts b/test/unit/callai-vision.integration.test.ts similarity index 84% rename from test/callai-vision.integration.test.ts rename to test/unit/callai-vision.integration.test.ts index ca98164..cca0500 100644 --- a/test/callai-vision.integration.test.ts +++ b/test/unit/callai-vision.integration.test.ts @@ -1,14 +1,11 @@ -import { callAi, ContentItem } from "../src/index"; -import dotenv from "dotenv"; +import { callAi, ContentItem, callAiEnv } from "call-ai"; import fs from "fs"; import path from "path"; - -// Load environment variables from .env file if present -dotenv.config(); +import { itif } from "../test-helper.js"; +import { describe, expect } from "vitest"; // Skip tests if no API key is available -const haveApiKey = process.env.OPENROUTER_API_KEY || process.env.CALLAI_API_KEY; -const itif = (condition: boolean) => (condition ? it : it.skip); +const haveApiKey = callAiEnv.CALLAI_API_KEY; // Timeout for image recognition tests const TIMEOUT = 30000; @@ -46,7 +43,7 @@ describe("Call-AI Vision Recognition", () => { try { // Call the callAi function with the vision model const result = await callAi([{ role: "user", content }], { - apiKey: process.env.OPENROUTER_API_KEY || process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "openai/gpt-4o-2024-08-06", }); diff --git a/test/deepseek-wire.test.ts b/test/unit/deepseek-wire.test.ts similarity index 69% rename from test/deepseek-wire.test.ts rename to test/unit/deepseek-wire.test.ts index b4610bf..fbc1299 100644 --- a/test/deepseek-wire.test.ts +++ b/test/unit/deepseek-wire.test.ts @@ -1,42 +1,37 @@ import fs from "fs"; import path from "path"; -import { callAi, Schema, Message } from "../src/index"; +import { callAi, Schema, Message } from "call-ai"; +import { Mock, vitest, expect, describe, it, beforeEach } from "vitest"; // Mock fetch to use our fixture files -global.fetch = jest.fn(); +global.fetch = vitest.fn(); describe("DeepSeek Wire Protocol Tests", () => { // Read fixtures - const deepseekRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/deepseek-request.json"), - "utf8", - ), - ); - - const deepseekResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/deepseek-response.json"), - "utf8", - ); - - const deepseekSystemRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/deepseek-system-request.json"), - "utf8", - ), - ); - - const deepseekSystemResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/deepseek-system-response.json"), - "utf8", - ); + // const deepseekRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/deepseek-request.json"), + // "utf8", + // ), + // ); + + const deepseekResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/deepseek-response.json"), "utf8"); + + // const deepseekSystemRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/deepseek-system-request.json"), + // "utf8", + // ), + // ); + + const deepseekSystemResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/deepseek-system-response.json"), "utf8"); beforeEach(() => { // Reset mocks - (global.fetch as jest.Mock).mockClear(); + (global.fetch as Mock).mockClear(); // Mock successful response - (global.fetch as jest.Mock).mockImplementation(async (url, options) => { + (global.fetch as Mock).mockImplementation(async () => { return { ok: true, status: 200, @@ -60,31 +55,24 @@ describe("DeepSeek Wire Protocol Tests", () => { }; // Call the library function with the schema - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "deepseek/deepseek-chat", - schema: schema, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "deepseek/deepseek-chat", + schema: schema, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Check that we're using system message approach rather than JSON schema format expect(actualRequestBody.messages).toBeTruthy(); expect(actualRequestBody.messages.length).toBeGreaterThan(1); // Check for system message with schema info - const systemMessage = actualRequestBody.messages.find( - (m: any) => m.role === "system", - ); + const systemMessage = actualRequestBody.messages.find((m: { role: string }) => m.role === "system"); expect(systemMessage).toBeTruthy(); expect(systemMessage.content).toContain("title"); expect(systemMessage.content).toContain("author"); @@ -93,13 +81,9 @@ describe("DeepSeek Wire Protocol Tests", () => { expect(systemMessage.content).toContain("rating"); // Verify user message is included - const userMessage = actualRequestBody.messages.find( - (m: any) => m.role === "user", - ); + const userMessage = actualRequestBody.messages.find((m: { role: string }) => m.role === "user"); expect(userMessage).toBeTruthy(); - expect(userMessage.content).toBe( - "Give me a short book recommendation in the requested format.", - ); + expect(userMessage.content).toBe("Give me a short book recommendation in the requested format."); // Verify response_format is not used expect(actualRequestBody.response_format).toBeUndefined(); @@ -107,7 +91,7 @@ describe("DeepSeek Wire Protocol Tests", () => { it("should correctly handle DeepSeek response with schema", async () => { // Update mock to return proper response - (global.fetch as jest.Mock).mockImplementationOnce(async (url, options) => { + (global.fetch as Mock).mockImplementationOnce(async () => { return { ok: true, status: 200, @@ -129,18 +113,15 @@ describe("DeepSeek Wire Protocol Tests", () => { }; // Call the library with DeepSeek model - const result = await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "deepseek/deepseek-chat", - schema: schema, - }, - ); + const result = await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "deepseek/deepseek-chat", + schema: schema, + }); // Parse the DeepSeek response fixture to get expected content - const responseObj = JSON.parse(deepseekResponseFixture); - const responseContent = responseObj.choices[0].message.content; + // const responseObj = JSON.parse(deepseekResponseFixture); + // const responseContent = responseObj.choices[0].message.content; // Verify the result expect(result).toBeTruthy(); @@ -156,7 +137,7 @@ describe("DeepSeek Wire Protocol Tests", () => { it("should handle system message approach with DeepSeek", async () => { // Update mock to return system message response - (global.fetch as jest.Mock).mockImplementationOnce(async (url, options) => { + (global.fetch as Mock).mockImplementationOnce(async () => { return { ok: true, status: 200, @@ -175,8 +156,7 @@ describe("DeepSeek Wire Protocol Tests", () => { }, { role: "user", - content: - "Give me a short book recommendation. Respond with only valid JSON matching the schema.", + content: "Give me a short book recommendation. Respond with only valid JSON matching the schema.", }, ] as Message[], { diff --git a/test/errors.integration.no-await.test.ts b/test/unit/errors.integration.no-await.test.ts similarity index 84% rename from test/errors.integration.no-await.test.ts rename to test/unit/errors.integration.no-await.test.ts index 35797df..20a0bb5 100644 --- a/test/errors.integration.no-await.test.ts +++ b/test/unit/errors.integration.no-await.test.ts @@ -1,14 +1,10 @@ -import { callAi } from "../src/index"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, callAiEnv } from "call-ai"; +import { vitest, describe, it, expect, assert } from "vitest"; // Configure retry settings for flaky tests -jest.retryTimes(2, { logErrorsBeforeRetry: true }); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; const itif = (condition: boolean) => (condition ? it : it.skip); // Timeout for individual test @@ -21,7 +17,7 @@ describe("Error handling integration tests", () => { async () => { // Make a simple API call with no model specified const result = await callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, // No model specified - should use default }); @@ -39,7 +35,7 @@ describe("Error handling integration tests", () => { // Attempt API call with a non-existent model await expect(async () => { await callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "fake-model-that-does-not-exist", skipRetry: true, // Skip retry mechanism to force the error }); @@ -55,7 +51,7 @@ describe("Error handling integration tests", () => { // Attempt streaming API call with a non-existent model await expect(async () => { const generator = callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "fake-model-that-does-not-exist", stream: true, skipRetry: true, // Skip retry mechanism to force the error @@ -63,11 +59,8 @@ describe("Error handling integration tests", () => { // Try to consume the generator // Cast to AsyncGenerator to ensure TypeScript recognizes it properly - const asyncGenerator = generator as unknown as AsyncGenerator< - string, - string, - unknown - >; + const asyncGenerator = generator as unknown as AsyncGenerator; + // eslint-disable-next-line @typescript-eslint/no-unused-vars for await (const _ of asyncGenerator) { // This should throw before yielding any chunks } @@ -85,12 +78,12 @@ describe("Error handling integration tests", () => { // Attempt API call with a non-existent model try { await callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: fakeModelId, skipRetry: true, // Skip retry mechanism to force the error }); // If we get here, fail the test - fail("Should have thrown an error"); + assert.fail("Should have thrown an error"); } catch (error) { // Verify error message contains useful information expect(error instanceof Error).toBe(true); @@ -99,7 +92,7 @@ describe("Error handling integration tests", () => { expect(error.message).toContain("HTTP error"); expect(error.message).toContain("400"); // Bad Request status code } else { - fail("Error is not an Error instance"); + assert.fail("Error is not an Error instance"); } } }, @@ -111,18 +104,18 @@ describe("Error handling integration tests", () => { "should handle error with debug option", async () => { // Spy on console.error - const consoleErrorSpy = jest.spyOn(console, "error"); + const consoleErrorSpy = vitest.spyOn(console, "error"); // Attempt API call with a non-existent model and debug enabled try { await callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "fake-model-that-does-not-exist", skipRetry: true, // Skip retry mechanism to force the error debug: true, // Enable debug mode }); // If we get here, fail the test - fail("Should have thrown an error"); + assert.fail("Should have thrown an error"); } catch (error) { // Verify console.error was called at least once (debug mode) expect(consoleErrorSpy).toHaveBeenCalled(); @@ -143,7 +136,7 @@ describe("Error handling integration tests", () => { try { // Create generator with invalid model in streaming mode const generator = callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "fake-model-that-does-not-exist", stream: true, skipRetry: true, // Skip retry mechanism to force the error @@ -154,24 +147,18 @@ describe("Error handling integration tests", () => { let finalResponse = ""; // Try to consume generator - may fail during consumption try { - const asyncGenerator = generator as unknown as AsyncGenerator< - string, - string, - unknown - >; + const asyncGenerator = generator as unknown as AsyncGenerator; for await (const chunk of asyncGenerator) { finalResponse = chunk; console.log(`Received chunk: ${chunk}`); } // If we get here, test what happens with JSON parsing - console.log( - `Final response (${finalResponse.length} chars): ${finalResponse}`, - ); + console.log(`Final response (${finalResponse.length} chars): ${finalResponse}`); JSON.parse(finalResponse); // If we reach here, the JSON parsing unexpectedly succeeded - fail("JSON parsing should have failed but succeeded"); + assert.fail("JSON parsing should have failed but succeeded"); } catch (streamError) { // We expect a SyntaxError from JSON.parse if (streamError instanceof SyntaxError) { @@ -191,7 +178,7 @@ describe("Error handling integration tests", () => { // If we want to fail the test when the streaming itself throws (rather than JSON.parse) // we could uncomment this line: - // fail(`Streaming should not throw directly but should return invalid JSON: ${error.message}`); + // assert.fail(`Streaming should not throw directly but should return invalid JSON: ${error.message}`); } else { console.log("Unexpected non-Error object thrown:", error); } @@ -215,7 +202,7 @@ describe("Error handling integration tests", () => { // Create generator with invalid model console.log("Creating generator..."); const generator = callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "fake-model-that-does-not-exist", stream: true, skipRetry: true, @@ -228,11 +215,7 @@ describe("Error handling integration tests", () => { console.log("Generator created, consuming chunks..."); // This mimics React's state updates - const asyncGenerator = generator as unknown as AsyncGenerator< - string, - string, - unknown - >; + const asyncGenerator = generator as unknown as AsyncGenerator; for await (const chunk of asyncGenerator) { responseText = chunk; console.log(`Updated response: ${responseText}`); @@ -248,10 +231,7 @@ describe("Error handling integration tests", () => { if (innerError instanceof Error) { console.log("Inner error caught:", innerError.message); } else { - console.log( - "Inner error caught (not an Error):", - String(innerError), - ); + console.log("Inner error caught (not an Error):", String(innerError)); } throw innerError; // Re-throw to outer catch } @@ -264,16 +244,10 @@ describe("Error handling integration tests", () => { if (outerError instanceof SyntaxError) { console.log("Got a SyntaxError - JSON parsing failed"); } else { - console.log( - "Error was not a SyntaxError:", - outerError.constructor.name, - ); + console.log("Error was not a SyntaxError:", outerError.constructor.name); } } else { - console.log( - "Outer error caught (not an Error):", - String(outerError), - ); + console.log("Outer error caught (not an Error):", String(outerError)); errorMessage = String(outerError); } } @@ -311,13 +285,10 @@ describe("Error handling integration tests", () => { debug: true, model: "fake-model-that-does-not-exist", skipRetry: true, - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, }) as unknown as AsyncGenerator; - console.log( - "Generator created, properties:", - Object.getOwnPropertyNames(generator), - ); + console.log("Generator created, properties:", Object.getOwnPropertyNames(generator)); // Delay the iteration slightly to mimic browser async behavior await new Promise((resolve) => setTimeout(resolve, 10)); @@ -345,16 +316,12 @@ describe("Error handling integration tests", () => { responseText = result.value; } else { responseText = result.value; - console.log( - `Received chunk: ${responseText.substring(0, 30)}...`, - ); + console.log(`Received chunk: ${responseText.substring(0, 30)}...`); } } console.log("Step 3: Completed iteration without errors"); - console.log( - `Final response (${responseText.length} chars): ${responseText}`, - ); + console.log(`Final response (${responseText.length} chars): ${responseText}`); // If we get here, try parsing the response try { @@ -371,11 +338,7 @@ describe("Error handling integration tests", () => { } catch (iterError: unknown) { // Properly type the error const error = iterError as Error; - console.log( - "Error during iteration:", - error.constructor.name, - error.message, - ); + console.log("Error during iteration:", error.constructor.name, error.message); errorCaught = true; expect(error.message).toContain("API returned error 400"); } diff --git a/test/errors.integration.test.ts b/test/unit/errors.integration.test.ts similarity index 79% rename from test/errors.integration.test.ts rename to test/unit/errors.integration.test.ts index 88fc4b2..88afdea 100644 --- a/test/errors.integration.test.ts +++ b/test/unit/errors.integration.test.ts @@ -1,11 +1,5 @@ -import { callAi } from "../src/index"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); - -// Configure retry settings for flaky tests -jest.retryTimes(2, { logErrorsBeforeRetry: true }); +import { callAi } from "call-ai"; +import { assert, describe, expect, it, vitest } from "vitest"; // Skip tests if no API key is available const haveApiKey = process.env.CALLAI_API_KEY; @@ -54,23 +48,17 @@ describe("Error handling integration tests", () => { async () => { // Attempt streaming API call with a non-existent model await expect(async () => { - const generator = await callAi( - "Write a short joke about programming.", - { - apiKey: process.env.CALLAI_API_KEY, - model: "fake-model-that-does-not-exist", - stream: true, - skipRetry: true, // Skip retry mechanism to force the error - }, - ); + const generator = await callAi("Write a short joke about programming.", { + apiKey: process.env.CALLAI_API_KEY, + model: "fake-model-that-does-not-exist", + stream: true, + skipRetry: true, // Skip retry mechanism to force the error + }); // Try to consume the generator // Cast to AsyncGenerator to ensure TypeScript recognizes it properly - const asyncGenerator = generator as AsyncGenerator< - string, - string, - unknown - >; + const asyncGenerator = generator as AsyncGenerator; + // eslint-disable-next-line @typescript-eslint/no-unused-vars for await (const _ of asyncGenerator) { // This should throw before yielding any chunks } @@ -93,7 +81,7 @@ describe("Error handling integration tests", () => { skipRetry: true, // Skip retry mechanism to force the error }); // If we get here, fail the test - fail("Should have thrown an error"); + assert.fail("Should have thrown an error"); } catch (error) { // Verify error message contains useful information expect(error instanceof Error).toBe(true); @@ -102,7 +90,7 @@ describe("Error handling integration tests", () => { expect(error.message).toContain("HTTP error"); expect(error.message).toContain("400"); // Bad Request status code } else { - fail("Error is not an Error instance"); + assert.fail("Error is not an Error instance"); } } }, @@ -114,7 +102,7 @@ describe("Error handling integration tests", () => { "should handle error with debug option", async () => { // Spy on console.error - const consoleErrorSpy = jest.spyOn(console, "error"); + const consoleErrorSpy = vitest.spyOn(console, "error"); // Attempt API call with a non-existent model and debug enabled try { @@ -125,7 +113,7 @@ describe("Error handling integration tests", () => { skipRetry: true, // Skip retry mechanism to force the error }); // If we get here, fail the test - fail("Should have thrown an error"); + assert.fail("Should have thrown an error"); } catch (error) { // Verify console.error was called with error details expect(consoleErrorSpy).toHaveBeenCalled(); @@ -144,22 +132,15 @@ describe("Error handling integration tests", () => { async () => { try { // Create generator with invalid model in streaming mode - const generator = await callAi( - "Write a short joke about programming.", - { - apiKey: process.env.CALLAI_API_KEY, - model: "fake-model-that-does-not-exist", - stream: true, - skipRetry: true, // Skip retry mechanism to force the error - }, - ); + const generator = await callAi("Write a short joke about programming.", { + apiKey: process.env.CALLAI_API_KEY, + model: "fake-model-that-does-not-exist", + stream: true, + skipRetry: true, // Skip retry mechanism to force the error + }); // Cast to AsyncGenerator to ensure TypeScript recognizes it properly - const asyncGenerator = generator as AsyncGenerator< - string, - string, - unknown - >; + const asyncGenerator = generator as AsyncGenerator; // Try to consume the generator console.log("Attempting to consume streaming response"); @@ -171,13 +152,11 @@ describe("Error handling integration tests", () => { // If we get here (unlikely), try to parse the response as JSON try { - console.log( - `Parsing final response (length: ${finalResponse.length})`, - ); + console.log(`Parsing final response (length: ${finalResponse.length})`); JSON.parse(finalResponse); // If we reach here, the JSON parsing unexpectedly succeeded - fail("JSON parsing should have failed but succeeded"); + assert.fail("JSON parsing should have failed but succeeded"); } catch (streamError) { // We expect a SyntaxError from JSON.parse if (streamError instanceof SyntaxError) { @@ -196,9 +175,7 @@ describe("Error handling integration tests", () => { console.log("Outer error message:", error.message); // Check for the error message in the new throw style - expect(error.message).toContain( - "fake-model-that-does-not-exist is not a valid model ID", - ); + expect(error.message).toContain("fake-model-that-does-not-exist is not a valid model ID"); // we could uncomment this line: // fail(`Streaming should not throw directly but should return invalid JSON: ${error.message}`); } else { @@ -228,11 +205,7 @@ describe("Error handling integration tests", () => { }); // Cast to AsyncGenerator - const asyncGenerator = generator as AsyncGenerator< - string, - string, - unknown - >; + const asyncGenerator = generator as AsyncGenerator; console.log("Starting iteration with for-await loop"); // This approach is often used in React components with useEffect @@ -246,9 +219,7 @@ describe("Error handling integration tests", () => { console.log("Error caught during iteration:", error); if (error instanceof Error) { errorMessage = error.message; - expect(error.message).toContain( - "fake-model-that-does-not-exist is not a valid model ID", - ); + expect(error.message).toContain("fake-model-that-does-not-exist is not a valid model ID"); } else { errorMessage = "Unknown error"; } @@ -257,9 +228,7 @@ describe("Error handling integration tests", () => { console.log("Error caught during generator creation:", error); if (error instanceof Error) { errorMessage = error.message; - expect(error.message).toContain( - "fake-model-that-does-not-exist is not a valid model ID", - ); + expect(error.message).toContain("fake-model-that-does-not-exist is not a valid model ID"); } else { errorMessage = "Unknown error"; } @@ -301,10 +270,7 @@ describe("Error handling integration tests", () => { apiKey: process.env.CALLAI_API_KEY, })) as AsyncGenerator; - console.log( - "Generator created, properties:", - Object.getOwnPropertyNames(generator), - ); + console.log("Generator created, properties:", Object.getOwnPropertyNames(generator)); // Delay the iteration slightly to mimic browser async behavior await new Promise((resolve) => setTimeout(resolve, 10)); @@ -332,16 +298,12 @@ describe("Error handling integration tests", () => { responseText = result.value; } else { responseText = result.value; - console.log( - `Received chunk: ${responseText.substring(0, 30)}...`, - ); + console.log(`Received chunk: ${responseText.substring(0, 30)}...`); } } console.log("Step 3: Completed iteration without errors"); - console.log( - `Final response (${responseText.length} chars): ${responseText}`, - ); + console.log(`Final response (${responseText.length} chars): ${responseText}`); // If we get here, try parsing the response try { @@ -358,24 +320,16 @@ describe("Error handling integration tests", () => { } catch (iterError: unknown) { // Properly type the error const error = iterError as Error; - console.log( - "Error during iteration:", - error.constructor.name, - error.message, - ); + console.log("Error during iteration:", error.constructor.name, error.message); errorCaught = true; - expect(error.message).toContain( - "fake-model-that-does-not-exist is not a valid model ID", - ); + expect(error.message).toContain("fake-model-that-does-not-exist is not a valid model ID"); } } catch (outerError: unknown) { // Properly type the error const error = outerError as Error; console.log("Outer error during generator creation:", error.message); errorCaught = true; - expect(error.message).toContain( - "fake-model-that-does-not-exist is not a valid model ID", - ); + expect(error.message).toContain("fake-model-that-does-not-exist is not a valid model ID"); } console.log("Final state - responseText:", responseText); diff --git a/test/exports.unit.test.ts b/test/unit/exports.unit.test.ts similarity index 76% rename from test/exports.unit.test.ts rename to test/unit/exports.unit.test.ts index eb2ca86..eb69557 100644 --- a/test/exports.unit.test.ts +++ b/test/unit/exports.unit.test.ts @@ -2,9 +2,12 @@ * Test to verify export aliases for backward compatibility */ +import { describe, it, expect } from "vitest"; +import * as exports from "call-ai"; + describe("Export Aliases", () => { it("should export both callAi and callAI for backward compatibility", () => { - const exports = require("../src/index"); + //const exports = require("../src/index"); // Both export names should exist expect(typeof exports.callAi).toBe("function"); diff --git a/test/gemini-wire.test.ts b/test/unit/gemini-wire.test.ts similarity index 73% rename from test/gemini-wire.test.ts rename to test/unit/gemini-wire.test.ts index 86a4d7f..bff325f 100644 --- a/test/gemini-wire.test.ts +++ b/test/unit/gemini-wire.test.ts @@ -1,42 +1,37 @@ import fs from "fs"; import path from "path"; -import { callAi, Schema, Message } from "../src/index"; +import { callAi, Schema, Message } from "call-ai"; +import { Mock, vitest, expect, describe, it, beforeEach } from "vitest"; // Mock fetch to use our fixture files -global.fetch = jest.fn(); +global.fetch = vitest.fn(); describe("Gemini Wire Protocol Tests", () => { // Read fixtures - const geminiSystemRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/gemini-system-request.json"), - "utf8", - ), - ); - - const geminiSystemResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/gemini-system-response.json"), - "utf8", - ); - - const geminiRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/gemini-request.json"), - "utf8", - ), - ); - - const geminiResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/gemini-response.json"), - "utf8", - ); + // const geminiSystemRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/gemini-system-request.json"), + // "utf8", + // ), + // ); + + const geminiSystemResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/gemini-system-response.json"), "utf8"); + + // const geminiRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/gemini-request.json"), + // "utf8", + // ), + // ); + + const geminiResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/gemini-response.json"), "utf8"); beforeEach(() => { // Reset mocks - (global.fetch as jest.Mock).mockClear(); + (global.fetch as Mock).mockClear(); // Mock successful response - (global.fetch as jest.Mock).mockImplementation(async (url, options) => { + (global.fetch as Mock).mockImplementation(async () => { return { ok: true, status: 200, @@ -60,30 +55,23 @@ describe("Gemini Wire Protocol Tests", () => { }; // Call the library function with the schema - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "google/gemini-2.0-flash-001", - schema: schema, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "google/gemini-2.0-flash-001", + schema: schema, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Check that we're using JSON Schema format since Gemini is not Claude expect(actualRequestBody.response_format).toBeTruthy(); expect(actualRequestBody.response_format.type).toBe("json_schema"); expect(actualRequestBody.response_format.json_schema).toBeTruthy(); - expect(actualRequestBody.response_format.json_schema.name).toBe( - "book_recommendation", - ); + expect(actualRequestBody.response_format.json_schema.name).toBe("book_recommendation"); // Verify schema structure const schemaObj = actualRequestBody.response_format.json_schema.schema; @@ -98,7 +86,7 @@ describe("Gemini Wire Protocol Tests", () => { it("should correctly handle Gemini response with schema", async () => { // Update mock to return proper response - (global.fetch as jest.Mock).mockImplementationOnce(async (url, options) => { + (global.fetch as Mock).mockImplementationOnce(async () => { return { ok: true, status: 200, @@ -120,18 +108,15 @@ describe("Gemini Wire Protocol Tests", () => { }; // Call the library with Gemini model - const result = await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "google/gemini-2.0-flash-001", - schema: schema, - }, - ); + const result = await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "google/gemini-2.0-flash-001", + schema: schema, + }); // Parse the Gemini response fixture to get expected content - const responseObj = JSON.parse(geminiResponseFixture); - const responseContent = responseObj.choices[0].message.content; + // const responseObj = JSON.parse(geminiResponseFixture); + // const responseContent = responseObj.choices[0].message.content; // Verify the result expect(result).toBeTruthy(); @@ -139,9 +124,7 @@ describe("Gemini Wire Protocol Tests", () => { // Gemini might return content with code blocks if (typeof result === "string") { // Check if the result includes code blocks - const cleanResult = result.includes("```") - ? result.replace(/```json\n|\n```|```\n|\n```/g, "") - : result; + const cleanResult = result.includes("```") ? result.replace(/```json\n|\n```|```\n|\n```/g, "") : result; // Parse the content as JSON and validate const parsed = JSON.parse(cleanResult); @@ -170,8 +153,7 @@ describe("Gemini Wire Protocol Tests", () => { }, { role: "user", - content: - "Give me a short book recommendation. Respond with only valid JSON matching the schema.", + content: "Give me a short book recommendation. Respond with only valid JSON matching the schema.", }, ]; @@ -184,9 +166,7 @@ describe("Gemini Wire Protocol Tests", () => { expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Verify messages are passed through correctly expect(actualRequestBody.messages).toEqual(messages); @@ -203,8 +183,7 @@ describe("Gemini Wire Protocol Tests", () => { }, { role: "user", - content: - "Give me a short book recommendation. Respond with only valid JSON matching the schema.", + content: "Give me a short book recommendation. Respond with only valid JSON matching the schema.", }, ] as Message[], { @@ -218,13 +197,8 @@ describe("Gemini Wire Protocol Tests", () => { if (typeof result === "string") { // Handle possible markdown code blocks in the response - const jsonMatch = (result as string).match( - /```json\s*([\s\S]*?)\s*```/, - ) || - (result as string).match(/```\s*([\s\S]*?)\s*```/) || [ - null, - result as string, - ]; + const jsonMatch = (result as string).match(/```json\s*([\s\S]*?)\s*```/) || + (result as string).match(/```\s*([\s\S]*?)\s*```/) || [null, result as string]; const jsonContent = jsonMatch[1] || (result as string); @@ -247,7 +221,7 @@ describe("Gemini Wire Protocol Tests", () => { it("should handle schema when response_format schema is supported", async () => { // Override the mock for this specific test - (global.fetch as jest.Mock).mockImplementationOnce(async (url, options) => { + (global.fetch as Mock).mockImplementationOnce(async () => { return { ok: true, status: 200, @@ -269,30 +243,23 @@ describe("Gemini Wire Protocol Tests", () => { }; // Call the library function with schema format set to true to test fallback - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "google/gemini-2.0-flash-001", - schema: schema, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "google/gemini-2.0-flash-001", + schema: schema, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Check that we're using response_format.json_schema approach instead expect(actualRequestBody.response_format).toBeTruthy(); expect(actualRequestBody.response_format.type).toBe("json_schema"); expect(actualRequestBody.response_format.json_schema).toBeTruthy(); - expect(actualRequestBody.response_format.json_schema.name).toBe( - "book_recommendation", - ); + expect(actualRequestBody.response_format.json_schema.name).toBe("book_recommendation"); // Verify schema structure const schemaObj = actualRequestBody.response_format.json_schema.schema; diff --git a/test/gpt4turbo-wire.test.ts b/test/unit/gpt4turbo-wire.test.ts similarity index 70% rename from test/gpt4turbo-wire.test.ts rename to test/unit/gpt4turbo-wire.test.ts index 32d773b..4f2b0b5 100644 --- a/test/gpt4turbo-wire.test.ts +++ b/test/unit/gpt4turbo-wire.test.ts @@ -1,30 +1,28 @@ import fs from "fs"; import path from "path"; -import { callAi, Schema, Message } from "../src/index"; +import { callAi, Schema, Message } from "call-ai"; +import { Mock, vitest, expect, describe, it, beforeEach } from "vitest"; // Mock fetch to use our fixture files -global.fetch = jest.fn(); +global.fetch = vitest.fn(); describe("GPT-4 Turbo Wire Protocol Tests", () => { // Read fixtures - const gpt4turboSystemRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/gpt4turbo-system-request.json"), - "utf8", - ), - ); - - const gpt4turboSystemResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/gpt4turbo-system-response.json"), - "utf8", - ); + // const gpt4turboSystemRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/gpt4turbo-system-request.json"), + // "utf8", + // ), + // ); + + const gpt4turboSystemResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/gpt4turbo-system-response.json"), "utf8"); beforeEach(() => { // Reset mocks - (global.fetch as jest.Mock).mockClear(); + (global.fetch as Mock).mockClear(); // Mock successful response - (global.fetch as jest.Mock).mockImplementation(async (url, options) => { + (global.fetch as Mock).mockImplementation(async () => { return { ok: true, status: 200, @@ -48,32 +46,25 @@ describe("GPT-4 Turbo Wire Protocol Tests", () => { }; // Call the library function with the schema using system message approach - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4-turbo", - schema: schema, - forceSystemMessage: true, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4-turbo", + schema: schema, + forceSystemMessage: true, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Check that we're using system messages expect(actualRequestBody.messages).toBeTruthy(); expect(actualRequestBody.messages.length).toBeGreaterThanOrEqual(1); // Find the system message - const systemMessage = actualRequestBody.messages.find( - (m: any) => m.role === "system", - ); + const systemMessage = actualRequestBody.messages.find((m: { role: string }) => m.role === "system"); expect(systemMessage).toBeTruthy(); expect(systemMessage.content).toContain("title"); expect(systemMessage.content).toContain("author"); @@ -81,13 +72,9 @@ describe("GPT-4 Turbo Wire Protocol Tests", () => { expect(systemMessage.content).toContain("rating"); // Verify user message is included - const userMessage = actualRequestBody.messages.find( - (m: any) => m.role === "user", - ); + const userMessage = actualRequestBody.messages.find((m: { role: string }) => m.role === "user"); expect(userMessage).toBeTruthy(); - expect(userMessage.content).toBe( - "Give me a short book recommendation in the requested format.", - ); + expect(userMessage.content).toBe("Give me a short book recommendation in the requested format."); }); it("should correctly handle GPT-4 Turbo response with system message", async () => { @@ -101,8 +88,7 @@ describe("GPT-4 Turbo Wire Protocol Tests", () => { }, { role: "user", - content: - "Give me a short book recommendation. Respond with only valid JSON matching the schema.", + content: "Give me a short book recommendation. Respond with only valid JSON matching the schema.", }, ] as Message[], { @@ -144,14 +130,11 @@ describe("GPT-4 Turbo Wire Protocol Tests", () => { }; // Call the library function with the schema - const result = await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4-turbo", - schema: schema, - }, - ); + const result = await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4-turbo", + schema: schema, + }); // Verify the result expect(result).toBeTruthy(); @@ -187,34 +170,25 @@ describe("GPT-4 Turbo Wire Protocol Tests", () => { }; // Call the library function with the schema - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4-turbo", - schema: schema, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4-turbo", + schema: schema, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Check that we're sending messages expect(actualRequestBody.messages).toBeTruthy(); expect(actualRequestBody.messages.length).toBeGreaterThan(0); // Verify user message is included - const userMessage = actualRequestBody.messages.find( - (m: any) => m.role === "user", - ); + const userMessage = actualRequestBody.messages.find((m: { role: string }) => m.role === "user"); expect(userMessage).toBeTruthy(); - expect(userMessage.content).toBe( - "Give me a short book recommendation in the requested format.", - ); + expect(userMessage.content).toBe("Give me a short book recommendation in the requested format."); }); }); diff --git a/test/image.integration.no-await.test.ts b/test/unit/image.integration.no-await.test.ts similarity index 79% rename from test/image.integration.no-await.test.ts rename to test/unit/image.integration.no-await.test.ts index 9699ad6..464935b 100644 --- a/test/image.integration.no-await.test.ts +++ b/test/unit/image.integration.no-await.test.ts @@ -1,12 +1,9 @@ -import { callAi } from "../src/index"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, callAiEnv } from "call-ai"; +import { itif } from "../test-helper.js"; +import { describe, it, expect } from "vitest"; // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; -const itif = (condition: boolean) => (condition ? it : it.skip); +const haveApiKey = callAiEnv.CALLAI_API_KEY; // Timeout for image generation tests const TIMEOUT = 20000; @@ -25,12 +22,8 @@ describe("Vision Model Tests", () => { itif(Boolean(haveApiKey))( "should use a vision model to describe an image", async () => { - console.log( - "Note: This test is just to verify vision model integration using description", - ); - console.log( - "Image generation requires direct OpenAI API access with DALL-E models", - ); + console.log("Note: This test is just to verify vision model integration using description"); + console.log("Image generation requires direct OpenAI API access with DALL-E models"); // Create a simple message const messages: Message[] = [ @@ -43,7 +36,7 @@ describe("Vision Model Tests", () => { try { // Call the API with a vision model (OpenRouter supports these) const response = await callAi(messages, { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "meta-llama/llama-3.2-11b-vision", // Vision-capable model modalities: ["text"], }); @@ -68,6 +61,7 @@ describe("Vision Model Tests", () => { console.log("Received a valid response from the vision model"); expect(parsed).toBeTruthy(); } + // eslint-disable-next-line @typescript-eslint/no-unused-vars } catch (e) { // If not valid JSON, it might be a direct text response console.log("Direct text response:", response.substring(0, 150)); @@ -80,9 +74,7 @@ describe("Vision Model Tests", () => { } catch (error) { console.error("Test failed with exception:", error); // Don't fail the test immediately as we're exploring compatibility - console.log( - "Note: This test may fail if the specific vision model isn't available through your API key", - ); + console.log("Note: This test may fail if the specific vision model isn't available through your API key"); expect(error).toBeDefined(); // Simple assertion to avoid test failure } }, @@ -95,9 +87,7 @@ describe("Vision Model Tests", () => { console.log("1. Use the OpenAI API directly (not OpenRouter)"); console.log("2. Use the images/generations endpoint"); console.log("3. The model ID should be 'dall-e-3'"); - console.log( - "4. Documentation: https://platform.openai.com/docs/api-reference/images", - ); + console.log("4. Documentation: https://platform.openai.com/docs/api-reference/images"); // A passing test that just provides information expect(true).toBe(true); diff --git a/test/image.integration.test.ts b/test/unit/image.integration.test.ts similarity index 81% rename from test/image.integration.test.ts rename to test/unit/image.integration.test.ts index 9699ad6..8795924 100644 --- a/test/image.integration.test.ts +++ b/test/unit/image.integration.test.ts @@ -1,11 +1,8 @@ -import { callAi } from "../src/index"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, callAiEnv } from "call-ai"; +import { it, describe, expect } from "vitest"; // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; const itif = (condition: boolean) => (condition ? it : it.skip); // Timeout for image generation tests @@ -25,12 +22,8 @@ describe("Vision Model Tests", () => { itif(Boolean(haveApiKey))( "should use a vision model to describe an image", async () => { - console.log( - "Note: This test is just to verify vision model integration using description", - ); - console.log( - "Image generation requires direct OpenAI API access with DALL-E models", - ); + console.log("Note: This test is just to verify vision model integration using description"); + console.log("Image generation requires direct OpenAI API access with DALL-E models"); // Create a simple message const messages: Message[] = [ @@ -43,7 +36,7 @@ describe("Vision Model Tests", () => { try { // Call the API with a vision model (OpenRouter supports these) const response = await callAi(messages, { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: "meta-llama/llama-3.2-11b-vision", // Vision-capable model modalities: ["text"], }); @@ -68,6 +61,7 @@ describe("Vision Model Tests", () => { console.log("Received a valid response from the vision model"); expect(parsed).toBeTruthy(); } + // eslint-disable-next-line @typescript-eslint/no-unused-vars } catch (e) { // If not valid JSON, it might be a direct text response console.log("Direct text response:", response.substring(0, 150)); @@ -80,9 +74,7 @@ describe("Vision Model Tests", () => { } catch (error) { console.error("Test failed with exception:", error); // Don't fail the test immediately as we're exploring compatibility - console.log( - "Note: This test may fail if the specific vision model isn't available through your API key", - ); + console.log("Note: This test may fail if the specific vision model isn't available through your API key"); expect(error).toBeDefined(); // Simple assertion to avoid test failure } }, @@ -95,9 +87,7 @@ describe("Vision Model Tests", () => { console.log("1. Use the OpenAI API directly (not OpenRouter)"); console.log("2. Use the images/generations endpoint"); console.log("3. The model ID should be 'dall-e-3'"); - console.log( - "4. Documentation: https://platform.openai.com/docs/api-reference/images", - ); + console.log("4. Documentation: https://platform.openai.com/docs/api-reference/images"); // A passing test that just provides information expect(true).toBe(true); diff --git a/test/imagegen.integration.test.ts b/test/unit/imagegen.integration.test.ts similarity index 72% rename from test/imagegen.integration.test.ts rename to test/unit/imagegen.integration.test.ts index 5796176..d8daa29 100644 --- a/test/imagegen.integration.test.ts +++ b/test/unit/imagegen.integration.test.ts @@ -1,26 +1,18 @@ -import { imageGen } from "../src/index"; -import dotenv from "dotenv"; -// Import jest fetch mock -import "jest-fetch-mock"; +/// +import { imageGen } from "call-ai"; +import { describe, it, expect, vitest, beforeEach, Mock } from "vitest"; // Add type declaration for Node.js require -// @ts-ignore - using require for jest-fetch-mock -const fetchMock = require("jest-fetch-mock"); // Configure fetch mock -global.fetch = fetchMock; -fetchMock.enableMocks(); - -// Load environment variables from .env file if present -dotenv.config(); +global.fetch = vitest.fn(); // Mock response for image generation const mockImageResponse = { created: Date.now(), data: [ { - b64_json: - "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==", // 1x1 px transparent PNG + b64_json: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==", // 1x1 px transparent PNG revised_prompt: "Generated image based on prompt", }, ], @@ -29,14 +21,16 @@ const mockImageResponse = { describe("Image Generation Integration Tests", () => { beforeEach(() => { // Reset fetch mocks before each test - fetchMock.resetMocks(); + (global.fetch as Mock).mockClear(); }); it("should generate an image with a text prompt", async () => { // Set up fetch mock for image generation - fetchMock.mockResponseOnce(JSON.stringify(mockImageResponse), { + (global.fetch as Mock).mockResolvedValueOnce({ + json: async () => mockImageResponse, + ok: true, status: 200, - headers: { "Content-Type": "application/json" }, + headers: new Headers({ "Content-Type": "application/json" }), }); // Generate test prompt @@ -49,9 +43,9 @@ describe("Image Generation Integration Tests", () => { debug: true, }); + console.log("Image editing test result:", result); // Verify the structure of the response expect(result).toBeDefined(); - expect(result.created).toBeDefined(); expect(Array.isArray(result.data)).toBe(true); expect(result.data.length).toBeGreaterThan(0); expect(result.data[0].b64_json).toBeDefined(); @@ -62,8 +56,8 @@ describe("Image Generation Integration Tests", () => { expect(imageBase64.length).toBeGreaterThan(0); // Verify the request was made correctly - expect(fetchMock).toHaveBeenCalledTimes(1); - expect(fetchMock).toHaveBeenCalledWith( + expect(fetch).toHaveBeenCalledTimes(1); + expect(fetch).toHaveBeenCalledWith( expect.stringMatching(/.*\/api\/openai-image\/generate$/), expect.objectContaining({ method: "POST", @@ -76,8 +70,8 @@ describe("Image Generation Integration Tests", () => { ); // Verify request body content - const mockCall = fetchMock.mock.calls[0]; - const requestBody = JSON.parse(mockCall[1].body as string); + const mockCall = (global.fetch as Mock).mock.calls[0] as [unknown, { body: string }]; + const requestBody = JSON.parse(mockCall[1].body); expect(requestBody.prompt).toBe(testPrompt); expect(requestBody.model).toBe("gpt-image-1"); @@ -86,13 +80,14 @@ describe("Image Generation Integration Tests", () => { it("should handle image editing with multiple input images", async () => { // Set up fetch mock for image editing - fetchMock.mockResponseOnce(JSON.stringify(mockImageResponse), { + (global.fetch as Mock).mockResolvedValueOnce({ + json: async () => mockImageResponse, status: 200, + ok: true, headers: { "Content-Type": "application/json" }, }); - const testPrompt = - "Create a lovely gift basket with these four items in it"; + const testPrompt = "Create a lovely gift basket with these four items in it"; // Mock implementation for File objects const mockImageBlob = new Blob(["fake image data"], { type: "image/png" }); @@ -110,14 +105,14 @@ describe("Image Generation Integration Tests", () => { // Verify the structure of the response expect(result).toBeDefined(); - expect(result.created).toBeDefined(); + expect(result.created).toBeGreaterThan(0); expect(Array.isArray(result.data)).toBe(true); expect(result.data.length).toBeGreaterThan(0); expect(result.data[0].b64_json).toBeDefined(); // Verify the request was made correctly - expect(fetchMock).toHaveBeenCalledTimes(1); - expect(fetchMock).toHaveBeenCalledWith( + expect(fetch).toHaveBeenCalledTimes(1); + expect(fetch).toHaveBeenCalledWith( expect.stringMatching(/.*\/api\/openai-image\/edit$/), expect.objectContaining({ method: "POST", diff --git a/test/imagegen.test.js b/test/unit/imagegen.test.ts similarity index 55% rename from test/imagegen.test.js rename to test/unit/imagegen.test.ts index 41b3295..26fdd7f 100644 --- a/test/imagegen.test.js +++ b/test/unit/imagegen.test.ts @@ -4,56 +4,57 @@ */ // Import the function directly from the module -const { imageGen } = require('../dist/api'); -const assert = require('assert'); +import { vitest, describe, test, expect, beforeEach } from "vitest"; +import { imageGen } from "call-ai"; // Mock fetch for testing -global.fetch = jest.fn(() => +global.fetch = vitest.fn(() => Promise.resolve({ ok: true, status: 200, statusText: "OK", - json: () => Promise.resolve({ - created: Date.now(), - data: [ - { - b64_json: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==", // 1x1 px transparent PNG - revised_prompt: "Generated image based on prompt" - } - ] - }) - }) + json: () => + Promise.resolve({ + created: Date.now(), + data: [ + { + b64_json: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==", // 1x1 px transparent PNG + revised_prompt: "Generated image based on prompt", + }, + ], + }), + } as Response), ); -describe('imageGen function', () => { +describe("imageGen function", () => { beforeEach(() => { - jest.clearAllMocks(); + vitest.clearAllMocks(); }); // Simple test to verify basic functionality - test('should make a POST request to generate an image', async () => { + test("should make a POST request to generate an image", async () => { const prompt = "A children's book drawing of a veterinarian"; - + try { // Call the imageGen function const result = await imageGen(prompt, { - apiKey: 'VIBES_DIY', - model: 'gpt-image-1' + apiKey: "VIBES_DIY", + model: "gpt-image-1", }); - + // Verify the fetch call was made correctly expect(fetch).toHaveBeenCalledTimes(1); expect(fetch).toHaveBeenCalledWith( - '/api/openai-image/generate', + "/api/openai-image/generate", expect.objectContaining({ - method: 'POST', + method: "POST", headers: expect.objectContaining({ - 'Authorization': 'Bearer VIBES_DIY', - 'Content-Type': 'application/json' - }) - }) + Authorization: "Bearer VIBES_DIY", + "Content-Type": "application/json", + }), + }), ); - + // Verify the result structure expect(result).toBeDefined(); expect(result.data).toBeInstanceOf(Array); @@ -61,48 +62,48 @@ describe('imageGen function', () => { expect(result.data[0].b64_json).toBeDefined(); } catch (error) { // Log in case of error to help with debugging - console.error('Test failed:', error); + console.error("Test failed:", error); throw error; } }); - + // Test for image editing with multiple images - test('should make a POST request for image editing', async () => { + test("should make a POST request for image editing", async () => { const prompt = "Create a lovely gift basket with these items"; - + // Mock File objects - const mockImageBlob = new Blob(['fake image data'], { type: 'image/png' }); + const mockImageBlob = new Blob(["fake image data"], { type: "image/png" }); const mockFiles = [ - new File([mockImageBlob], 'image1.png', { type: 'image/png' }), - new File([mockImageBlob], 'image2.png', { type: 'image/png' }) + new File([mockImageBlob], "image1.png", { type: "image/png" }), + new File([mockImageBlob], "image2.png", { type: "image/png" }), ]; - + try { const result = await imageGen(prompt, { - apiKey: 'VIBES_DIY', - model: 'gpt-image-1', - images: mockFiles + apiKey: "VIBES_DIY", + model: "gpt-image-1", + images: mockFiles, }); - + // Verify the fetch call was made correctly expect(fetch).toHaveBeenCalledTimes(1); expect(fetch).toHaveBeenCalledWith( - '/api/openai-image/edit', + "/api/openai-image/edit", expect.objectContaining({ - method: 'POST', + method: "POST", headers: expect.objectContaining({ - 'Authorization': 'Bearer VIBES_DIY' - }) - }) + Authorization: "Bearer VIBES_DIY", + }), + }), ); - + // Verify the result structure expect(result).toBeDefined(); expect(result.data).toBeInstanceOf(Array); expect(result.data.length).toBeGreaterThan(0); expect(result.data[0].b64_json).toBeDefined(); } catch (error) { - console.error('Test failed:', error); + console.error("Test failed:", error); throw error; } }); diff --git a/test/imagegen.unit.test.ts b/test/unit/imagegen.unit.test.ts similarity index 68% rename from test/imagegen.unit.test.ts rename to test/unit/imagegen.unit.test.ts index f7a2417..bba96ec 100644 --- a/test/imagegen.unit.test.ts +++ b/test/unit/imagegen.unit.test.ts @@ -1,23 +1,24 @@ -import { imageGen } from "../src/index"; +import { Mock, vitest, describe, it, expect, beforeEach, beforeAll, assert } from "vitest"; +import { imageGen } from "call-ai"; // Mock fetch -global.fetch = jest.fn(); +global.fetch = vitest.fn(); // Create mock objects in setup to avoid TypeScript errors -let mockBlobInstance: any; -let mockFileInstance: any; +// let mockBlobInstance: any; +// let mockFileInstance: any; // Setup mock constructors and instances beforeAll(() => { // Create mock instances that will be returned when 'new' is called - mockBlobInstance = { + const mockBlobInstance = { size: 0, type: "image/png", - arrayBuffer: jest.fn().mockResolvedValue(new ArrayBuffer(0)), - text: jest.fn().mockResolvedValue("mock text"), + arrayBuffer: vitest.fn().mockResolvedValue(new ArrayBuffer(0)), + text: vitest.fn().mockResolvedValue("mock text"), }; - mockFileInstance = { + const mockFileInstance = { name: "mock-file.png", type: "image/png", size: 0, @@ -26,22 +27,22 @@ beforeAll(() => { // Use a simple class implementation that Jest's objectContaining can properly match class MockFormData { - append = jest.fn(); - delete = jest.fn(); - get = jest.fn(); - getAll = jest.fn(); - has = jest.fn(); - set = jest.fn(); + append = vitest.fn(); + delete = vitest.fn(); + get = vitest.fn(); + getAll = vitest.fn(); + has = vitest.fn(); + set = vitest.fn(); } // Mock constructors - global.Blob = jest.fn().mockImplementation(() => mockBlobInstance) as any; - global.File = jest.fn().mockImplementation((_, name, options) => { + global.Blob = vitest.fn().mockImplementation(() => mockBlobInstance); //as any; + global.File = vitest.fn().mockImplementation((_, name, options) => { return { ...mockFileInstance, name, type: options?.type || "image/png" }; - }) as any; + }); //as any; // For FormData, create a new instance each time - global.FormData = MockFormData as any; + global.FormData = MockFormData as unknown as typeof FormData; }); // Mock response for successful image generation @@ -49,8 +50,7 @@ const mockImageResponse = { created: Date.now(), data: [ { - b64_json: - "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==", // 1x1 px transparent PNG + b64_json: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==", // 1x1 px transparent PNG revised_prompt: "Generated image based on prompt", }, ], @@ -58,18 +58,17 @@ const mockImageResponse = { describe("imageGen", () => { beforeEach(() => { - jest.clearAllMocks(); - (global.fetch as jest.Mock).mockResolvedValue({ + vitest.clearAllMocks(); + (global.fetch as Mock).mockResolvedValue({ ok: true, status: 200, statusText: "OK", - json: jest.fn().mockResolvedValue(mockImageResponse), + json: vitest.fn().mockResolvedValue(mockImageResponse), }); }); it("should make POST request with correct parameters for image generation", async () => { - const prompt = - "A children's book drawing of a veterinarian using a stethoscope to listen to the heartbeat of a baby otter."; + const prompt = "A children's book drawing of a veterinarian using a stethoscope to listen to the heartbeat of a baby otter."; const options = { apiKey: "VIBES_DIY", model: "gpt-image-1", @@ -92,13 +91,10 @@ describe("imageGen", () => { ); // Check request body - const requestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const requestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(requestBody).toEqual({ model: "gpt-image-1", - prompt: - "A children's book drawing of a veterinarian using a stethoscope to listen to the heartbeat of a baby otter.", + prompt: "A children's book drawing of a veterinarian using a stethoscope to listen to the heartbeat of a baby otter.", size: "1024x1024", }); @@ -147,20 +143,18 @@ describe("imageGen", () => { it("should handle errors from the image generation API", async () => { // Mock a failed response - (global.fetch as jest.Mock).mockResolvedValue({ + (global.fetch as Mock).mockResolvedValue({ ok: false, status: 400, statusText: "Bad Request", - text: jest - .fn() - .mockResolvedValue(JSON.stringify({ error: "Invalid prompt" })), + text: vitest.fn().mockResolvedValue(JSON.stringify({ error: "Invalid prompt" })), }); const prompt = "This prompt will cause an error"; try { await imageGen(prompt, { apiKey: "VIBES_DIY" }); - fail("Expected the image generation to throw an error"); + assert.fail("Expected the image generation to throw an error"); } catch (error) { expect((error as Error).message).toContain("Image generation failed"); expect((error as Error).message).toContain("400 Bad Request"); @@ -169,27 +163,23 @@ describe("imageGen", () => { it("should handle errors from the image editing API", async () => { // Mock a failed response - (global.fetch as jest.Mock).mockResolvedValue({ + (global.fetch as Mock).mockResolvedValue({ ok: false, status: 400, statusText: "Bad Request", - text: jest - .fn() - .mockResolvedValue(JSON.stringify({ error: "Invalid image format" })), + text: vitest.fn().mockResolvedValue(JSON.stringify({ error: "Invalid image format" })), }); const prompt = "This will trigger an error"; const mockImageBlob = new Blob(["fake image data"], { type: "image/png" }); - const mockFiles = [ - new File([mockImageBlob], "invalid.png", { type: "image/png" }), - ]; + const mockFiles = [new File([mockImageBlob], "invalid.png", { type: "image/png" })]; try { await imageGen(prompt, { apiKey: "VIBES_DIY", images: mockFiles, }); - fail("Expected the image editing to throw an error"); + assert.fail("Expected the image editing to throw an error"); } catch (error) { expect((error as Error).message).toContain("Image editing failed"); expect((error as Error).message).toContain("400 Bad Request"); diff --git a/test/llama3-wire.test.ts b/test/unit/llama3-wire.test.ts similarity index 69% rename from test/llama3-wire.test.ts rename to test/unit/llama3-wire.test.ts index 0a98b4b..134fea5 100644 --- a/test/llama3-wire.test.ts +++ b/test/unit/llama3-wire.test.ts @@ -1,42 +1,38 @@ import fs from "fs"; import path from "path"; -import { callAi, Schema, Message } from "../src/index"; +import { callAi, Schema, Message } from "call-ai"; +import { describe, beforeEach, Mock, it, expect, vitest } from "vitest"; +// import { vitest } from "vitest"; // Mock fetch to use our fixture files -global.fetch = jest.fn(); +global.fetch = vitest.fn(); describe("Llama3 Wire Protocol Tests", () => { // Read fixtures - const llama3RequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/llama3-request.json"), - "utf8", - ), - ); - - const llama3ResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/llama3-response.json"), - "utf8", - ); - - const llama3SystemRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/llama3-system-request.json"), - "utf8", - ), - ); - - const llama3SystemResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/llama3-system-response.json"), - "utf8", - ); + // const llama3RequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/llama3-request.json"), + // "utf8", + // ), + // ); + + const llama3ResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/llama3-response.json"), "utf8"); + + // const llama3SystemRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/llama3-system-request.json"), + // "utf8", + // ), + // ); + + const llama3SystemResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/llama3-system-response.json"), "utf8"); beforeEach(() => { // Reset mocks - (global.fetch as jest.Mock).mockClear(); + (global.fetch as Mock).mockClear(); // Mock successful response - (global.fetch as jest.Mock).mockImplementation(async (url, options) => { + (global.fetch as Mock).mockImplementation(async () => { return { ok: true, status: 200, @@ -60,31 +56,24 @@ describe("Llama3 Wire Protocol Tests", () => { }; // Call the library function with the schema - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "meta-llama/llama-3.3-70b-instruct", - schema: schema, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "meta-llama/llama-3.3-70b-instruct", + schema: schema, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Check that we're using system message approach rather than JSON schema format expect(actualRequestBody.messages).toBeTruthy(); expect(actualRequestBody.messages.length).toBeGreaterThan(1); // Check for system message with schema info - const systemMessage = actualRequestBody.messages.find( - (m: any) => m.role === "system", - ); + const systemMessage = actualRequestBody.messages.find((m: { role: string }) => m.role === "system"); expect(systemMessage).toBeTruthy(); expect(systemMessage.content).toContain("title"); expect(systemMessage.content).toContain("author"); @@ -93,13 +82,9 @@ describe("Llama3 Wire Protocol Tests", () => { expect(systemMessage.content).toContain("rating"); // Verify user message is included - const userMessage = actualRequestBody.messages.find( - (m: any) => m.role === "user", - ); + const userMessage = actualRequestBody.messages.find((m: { role: string }) => m.role === "user"); expect(userMessage).toBeTruthy(); - expect(userMessage.content).toBe( - "Give me a short book recommendation in the requested format.", - ); + expect(userMessage.content).toBe("Give me a short book recommendation in the requested format."); // Verify response_format is not used expect(actualRequestBody.response_format).toBeUndefined(); @@ -107,7 +92,7 @@ describe("Llama3 Wire Protocol Tests", () => { it("should correctly handle Llama3 response with schema", async () => { // Update mock to return proper response - (global.fetch as jest.Mock).mockImplementationOnce(async (url, options) => { + (global.fetch as Mock).mockImplementationOnce(async () => { return { ok: true, status: 200, @@ -129,18 +114,15 @@ describe("Llama3 Wire Protocol Tests", () => { }; // Call the library with Llama3 model - const result = await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "meta-llama/llama-3.3-70b-instruct", - schema: schema, - }, - ); + const result = await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "meta-llama/llama-3.3-70b-instruct", + schema: schema, + }); // Parse the Llama3 response fixture to get expected content - const responseObj = JSON.parse(llama3ResponseFixture); - const responseContent = responseObj.choices[0].message.content; + // const responseObj = JSON.parse(llama3ResponseFixture); + // const responseContent = responseObj.choices[0].message.content; // Verify the result expect(result).toBeTruthy(); @@ -156,7 +138,7 @@ describe("Llama3 Wire Protocol Tests", () => { it("should handle system message approach with Llama3", async () => { // Update mock to return system message response - (global.fetch as jest.Mock).mockImplementationOnce(async (url, options) => { + (global.fetch as Mock).mockImplementationOnce(async () => { return { ok: true, status: 200, @@ -175,8 +157,7 @@ describe("Llama3 Wire Protocol Tests", () => { }, { role: "user", - content: - "Give me a short book recommendation. Respond with only valid JSON matching the schema.", + content: "Give me a short book recommendation. Respond with only valid JSON matching the schema.", }, ] as Message[], { diff --git a/test/metadata.test.ts b/test/unit/metadata.test.ts similarity index 83% rename from test/metadata.test.ts rename to test/unit/metadata.test.ts index 80fbb85..f748550 100644 --- a/test/metadata.test.ts +++ b/test/unit/metadata.test.ts @@ -1,12 +1,12 @@ -import { callAi, getMeta } from "../src/index"; -import { ResponseMeta } from "../src/types"; +import { beforeEach, describe, expect, it, Mock, vitest } from "vitest"; +import { callAi, getMeta, ModelId, ResponseMeta } from "call-ai"; // Mock global fetch -global.fetch = jest.fn(); +global.fetch = vitest.fn(); // Simple mock for TextDecoder -global.TextDecoder = jest.fn().mockImplementation(() => ({ - decode: jest.fn((value) => { +global.TextDecoder = vitest.fn().mockImplementation(() => ({ + decode: vitest.fn((value) => { // Basic mock implementation without recursion if (value instanceof Uint8Array) { // Convert the Uint8Array to a simple string @@ -20,35 +20,35 @@ global.TextDecoder = jest.fn().mockImplementation(() => ({ // Mock ReadableStream const mockReader = { - read: jest.fn(), + read: vitest.fn(), }; // Create a mock response with headers const mockResponse = { - json: jest.fn(), - text: jest.fn(), + json: vitest.fn(), + text: vitest.fn(), body: { - getReader: jest.fn().mockReturnValue(mockReader), + getReader: vitest.fn().mockReturnValue(mockReader), }, ok: true, status: 200, statusText: "OK", headers: { - get: jest.fn((name) => { + get: vitest.fn((name) => { if (name === "content-type") return "application/json"; return null; - }) as jest.Mock, - forEach: jest.fn(), + }) as Mock, + forEach: vitest.fn(), }, - clone: jest.fn(function () { + clone: vitest.fn(function (this: typeof mockResponse) { return { ...this }; }), }; describe("getMeta", () => { beforeEach(() => { - jest.clearAllMocks(); - (global.fetch as jest.Mock).mockResolvedValue(mockResponse); + vitest.clearAllMocks(); + (global.fetch as Mock).mockResolvedValue(mockResponse); }); it("should return metadata for non-streaming responses", async () => { @@ -80,7 +80,7 @@ describe("getMeta", () => { // Verify raw response data expect(meta?.rawResponse).toBeDefined(); - expect(meta?.rawResponse.model).toBe("openai/gpt-4o"); + expect((meta?.rawResponse as ModelId).model).toBe("openai/gpt-4o"); // Verify timing information expect(meta?.timing).toBeDefined(); @@ -94,11 +94,7 @@ describe("getMeta", () => { // the streaming response would look like, and test that getMeta() works with it. // Create a simple AsyncGenerator to simulate streaming response - async function* mockStreamResponse(): AsyncGenerator< - string, - string, - unknown - > { + async function* mockStreamResponse(): AsyncGenerator { yield "Hello"; yield " world"; return "Hello world"; @@ -126,8 +122,8 @@ describe("getMeta", () => { testMap.set(generator, mockMeta); // Mock the getMeta function for this test to use our test map - const originalGetMeta = getMeta; - const mockedGetMeta = jest.fn((resp) => testMap.get(resp)); + // const originalGetMeta = getMeta; + const mockedGetMeta = vitest.fn((resp) => testMap.get(resp)); // Check that we can get metadata from our mocked streaming response const meta = mockedGetMeta(generator); diff --git a/test/no-await.integration.test.ts b/test/unit/no-await.integration.test.ts similarity index 59% rename from test/no-await.integration.test.ts rename to test/unit/no-await.integration.test.ts index d00867a..88ec29a 100644 --- a/test/no-await.integration.test.ts +++ b/test/unit/no-await.integration.test.ts @@ -1,17 +1,14 @@ -import { callAi } from "../src/index"; -import dotenv from "dotenv"; - -// Load environment variables from .env file if present -dotenv.config(); +import { callAi, callAiEnv } from "call-ai"; +import { expectOrWarn } from "../test-helper.js"; +import { describe, expect, it } from "vitest"; // Configure retry settings for flaky tests - use fewer retries with faster failures -jest.retryTimes(2, { logErrorsBeforeRetry: true }); // Increase Jest's default timeout to handle all parallel requests // jest.setTimeout(60000); // Skip tests if no API key is available -const haveApiKey = process.env.CALLAI_API_KEY; +const haveApiKey = callAiEnv.CALLAI_API_KEY; const itif = (condition: boolean) => (condition ? it.concurrent : it.skip); // Timeout for individual test @@ -30,19 +27,6 @@ const supportedModels = { // Define the model names as an array for looping const modelEntries = Object.entries(supportedModels); -// Function to handle test expectations based on model grade -const expectOrWarn = ( - model: { id: string; grade: string }, - condition: boolean, - message: string, -) => { - if (model.grade === "A") { - expect(condition).toBe(true); - } else if (!condition) { - console.warn(`Warning (${model.id}): ${message}`); - } -}; - // Create a test function that won't fail on timeouts for B and C grade models const gradeAwareTest = (modelId: { id: string; grade: string }) => { if (!haveApiKey) return it.skip; @@ -61,20 +45,15 @@ const gradeAwareTest = (modelId: { id: string; grade: string }) => { fn(), new Promise((resolve) => setTimeout(() => { - console.warn( - `Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`, - ); + console.warn(`Timeout for ${modelId.id} (Grade ${modelId.grade}): ${name}`); resolve(undefined); }, timeout || TIMEOUT), ), ]); return result; } catch (error: unknown) { - const errorMessage = - error instanceof Error ? error.message : String(error); - console.warn( - `Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`, - ); + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`Error in ${modelId.id} (Grade ${modelId.grade}): ${errorMessage}`); // Don't fail the test return; } @@ -96,16 +75,12 @@ describe("Simple callAi integration tests", () => { async () => { // Make a simple non-structured API call const result = await callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, }); // Verify response - expectOrWarn( - modelId, - !!result, - `should generate text with ${modelName} model without streaming`, - ); + expectOrWarn(modelId, !!result, `should generate text with ${modelName} model without streaming`); expect(typeof result).toBe("string"); expect((result as string).length).toBeGreaterThan(10); }, @@ -118,7 +93,7 @@ describe("Simple callAi integration tests", () => { async () => { // Make a simple non-structured API call with streaming const generator = callAi("Write a short joke about programming.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, stream: true, }) as unknown as AsyncGenerator; @@ -133,11 +108,7 @@ describe("Simple callAi integration tests", () => { } // Verify streaming response - expectOrWarn( - modelId, - chunkCount > 0, - `should generate text with ${modelName} model with streaming`, - ); + expectOrWarn(modelId, chunkCount > 0, `should generate text with ${modelName} model with streaming`); expect(lastChunk).toBeTruthy(); expect(lastChunk.length).toBeGreaterThan(10); }, @@ -163,17 +134,13 @@ describe("Simple callAi integration tests", () => { { role: "user", content: "What is the capital of France?" }, ], { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, }, ); // Verify response contains the expected answer - expectOrWarn( - modelId, - !!result, - `should handle message array input with ${modelName} model`, - ); + expectOrWarn(modelId, !!result, `should handle message array input with ${modelName} model`); expect(typeof result).toBe("string"); expect((result as string).toLowerCase()).toContain("paris"); }, @@ -201,7 +168,7 @@ describe("Simple callAi integration tests", () => { async () => { // Make the API call with schema const result = await callAi("Provide information about France.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, schema: simpleSchema, }); @@ -216,33 +183,13 @@ describe("Simple callAi integration tests", () => { try { // Parse and verify the result const data = JSON.parse(jsonContent); - expectOrWarn( - modelId, - !!data.name, - `Missing 'name' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.capital, - `Missing 'capital' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.population, - `Missing 'population' property in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.name, `Missing 'name' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.capital, `Missing 'capital' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.population, `Missing 'population' property in ${modelName} model response`); if (data.name && data.capital && data.population) { - expectOrWarn( - modelId, - typeof data.name === "string", - `'name' is not a string in ${modelName} model response`, - ); - expectOrWarn( - modelId, - typeof data.capital === "string", - `'capital' is not a string in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.name === "string", `'name' is not a string in ${modelName} model response`); + expectOrWarn(modelId, typeof data.capital === "string", `'capital' is not a string in ${modelName} model response`); expectOrWarn( modelId, typeof data.population === "number", @@ -260,11 +207,7 @@ describe("Simple callAi integration tests", () => { ); } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } }, TIMEOUT, @@ -313,14 +256,11 @@ describe("Simple callAi integration tests", () => { `should generate and validate complex structured data with ${modelName} model`, async () => { // Make the API call with the complex schema - const result = await callAi( - "Create a detailed travel plan for a weekend trip to a beach destination.", - { - apiKey: process.env.CALLAI_API_KEY, - model: modelId.id, - schema: complexSchema, - }, - ); + const result = await callAi("Create a detailed travel plan for a weekend trip to a beach destination.", { + apiKey: callAiEnv.CALLAI_API_KEY, + model: modelId.id, + schema: complexSchema, + }); // Extract JSON if wrapped in code blocks const content = result as string; @@ -334,36 +274,12 @@ describe("Simple callAi integration tests", () => { const data = JSON.parse(jsonContent); // Root properties validation - expectOrWarn( - modelId, - !!data.destination, - `Missing 'destination' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.duration, - `Missing 'duration' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.budget, - `Missing 'budget' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.activities, - `Missing 'activities' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.accommodation, - `Missing 'accommodation' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.transportation, - `Missing 'transportation' property in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.destination, `Missing 'destination' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.duration, `Missing 'duration' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.budget, `Missing 'budget' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.activities, `Missing 'activities' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.accommodation, `Missing 'accommodation' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.transportation, `Missing 'transportation' property in ${modelName} model response`); // Type checking - only if properties exist if (data.destination) @@ -373,23 +289,11 @@ describe("Simple callAi integration tests", () => { `'destination' is not a string in ${modelName} model response`, ); if (data.duration) - expectOrWarn( - modelId, - typeof data.duration === "number", - `'duration' is not a number in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.duration === "number", `'duration' is not a number in ${modelName} model response`); if (data.budget) - expectOrWarn( - modelId, - typeof data.budget === "number", - `'budget' is not a number in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.budget === "number", `'budget' is not a number in ${modelName} model response`); if (data.activities) - expectOrWarn( - modelId, - Array.isArray(data.activities), - `'activities' is not an array in ${modelName} model response`, - ); + expectOrWarn(modelId, Array.isArray(data.activities), `'activities' is not an array in ${modelName} model response`); if (data.accommodation) expectOrWarn( modelId, @@ -405,42 +309,22 @@ describe("Simple callAi integration tests", () => { // Array validation if (Array.isArray(data.activities)) { - expectOrWarn( - modelId, - data.activities.length > 0, - `'activities' array is empty in ${modelName} model response`, - ); - data.activities.forEach((activity: any) => { - expectOrWarn( - modelId, - typeof activity === "string", - `activity item is not a string in ${modelName} model response`, - ); + expectOrWarn(modelId, data.activities.length > 0, `'activities' array is empty in ${modelName} model response`); + data.activities.forEach((activity: unknown) => { + expectOrWarn(modelId, typeof activity === "string", `activity item is not a string in ${modelName} model response`); }); } // Nested object validation - accommodation if (data.accommodation) { - expectOrWarn( - modelId, - !!data.accommodation.name, - `Missing 'accommodation.name' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.accommodation.type, - `Missing 'accommodation.type' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.accommodation.name, `Missing 'accommodation.name' in ${modelName} model response`); + expectOrWarn(modelId, !!data.accommodation.type, `Missing 'accommodation.type' in ${modelName} model response`); expectOrWarn( modelId, !!data.accommodation.features, `Missing 'accommodation.features' in ${modelName} model response`, ); - expectOrWarn( - modelId, - !!data.accommodation.price, - `Missing 'accommodation.price' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.accommodation.price, `Missing 'accommodation.price' in ${modelName} model response`); if (data.accommodation.name) expectOrWarn( @@ -468,28 +352,16 @@ describe("Simple callAi integration tests", () => { ); if (Array.isArray(data.accommodation.features)) { - data.accommodation.features.forEach((feature: any) => { - expectOrWarn( - modelId, - typeof feature === "string", - `feature item is not a string in ${modelName} model response`, - ); + data.accommodation.features.forEach((feature: unknown) => { + expectOrWarn(modelId, typeof feature === "string", `feature item is not a string in ${modelName} model response`); }); } } // Nested object validation - transportation if (data.transportation) { - expectOrWarn( - modelId, - !!data.transportation.mode, - `Missing 'transportation.mode' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.transportation.cost, - `Missing 'transportation.cost' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.transportation.mode, `Missing 'transportation.mode' in ${modelName} model response`); + expectOrWarn(modelId, !!data.transportation.cost, `Missing 'transportation.cost' in ${modelName} model response`); if (data.transportation.mode) expectOrWarn( @@ -507,17 +379,8 @@ describe("Simple callAi integration tests", () => { // Value range validation if (data.duration) - expectOrWarn( - modelId, - data.duration > 0, - `'duration' is not positive in ${modelName} model response`, - ); - if (data.budget) - expectOrWarn( - modelId, - data.budget > 0, - `'budget' is not positive in ${modelName} model response`, - ); + expectOrWarn(modelId, data.duration > 0, `'duration' is not positive in ${modelName} model response`); + if (data.budget) expectOrWarn(modelId, data.budget > 0, `'budget' is not positive in ${modelName} model response`); if (data.accommodation?.price) expectOrWarn( modelId, @@ -531,11 +394,7 @@ describe("Simple callAi integration tests", () => { `'transportation.cost' is not positive in ${modelName} model response`, ); } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } }, TIMEOUT, @@ -577,7 +436,7 @@ describe("Simple callAi integration tests", () => { async () => { // Make the API call with schema const result = await callAi("Create a recipe for a healthy dinner.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, schema: complexFlatSchema, }); @@ -594,78 +453,26 @@ describe("Simple callAi integration tests", () => { const data = JSON.parse(jsonContent); // Check required properties - expectOrWarn( - modelId, - !!data.name, - `Missing 'name' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.cuisine, - `Missing 'cuisine' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.prepTime, - `Missing 'prepTime' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.cookTime, - `Missing 'cookTime' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.servings, - `Missing 'servings' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.difficulty, - `Missing 'difficulty' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.ingredients, - `Missing 'ingredients' property in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.steps, - `Missing 'steps' property in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.name, `Missing 'name' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.cuisine, `Missing 'cuisine' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.prepTime, `Missing 'prepTime' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.cookTime, `Missing 'cookTime' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.servings, `Missing 'servings' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.difficulty, `Missing 'difficulty' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.ingredients, `Missing 'ingredients' property in ${modelName} model response`); + expectOrWarn(modelId, !!data.steps, `Missing 'steps' property in ${modelName} model response`); // Check types if (data.name) - expectOrWarn( - modelId, - typeof data.name === "string", - `'name' is not a string in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.name === "string", `'name' is not a string in ${modelName} model response`); if (data.cuisine) - expectOrWarn( - modelId, - typeof data.cuisine === "string", - `'cuisine' is not a string in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.cuisine === "string", `'cuisine' is not a string in ${modelName} model response`); if (data.prepTime) - expectOrWarn( - modelId, - typeof data.prepTime === "number", - `'prepTime' is not a number in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.prepTime === "number", `'prepTime' is not a number in ${modelName} model response`); if (data.cookTime) - expectOrWarn( - modelId, - typeof data.cookTime === "number", - `'cookTime' is not a number in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.cookTime === "number", `'cookTime' is not a number in ${modelName} model response`); if (data.servings) - expectOrWarn( - modelId, - typeof data.servings === "number", - `'servings' is not a number in ${modelName} model response`, - ); + expectOrWarn(modelId, typeof data.servings === "number", `'servings' is not a number in ${modelName} model response`); if (data.difficulty) expectOrWarn( modelId, @@ -679,51 +486,23 @@ describe("Simple callAi integration tests", () => { `'ingredients' is not an array in ${modelName} model response`, ); if (data.steps) - expectOrWarn( - modelId, - Array.isArray(data.steps), - `'steps' is not an array in ${modelName} model response`, - ); + expectOrWarn(modelId, Array.isArray(data.steps), `'steps' is not an array in ${modelName} model response`); // Check arrays if (Array.isArray(data.ingredients)) - expectOrWarn( - modelId, - data.ingredients.length > 0, - `'ingredients' array is empty in ${modelName} model response`, - ); + expectOrWarn(modelId, data.ingredients.length > 0, `'ingredients' array is empty in ${modelName} model response`); if (Array.isArray(data.steps)) - expectOrWarn( - modelId, - data.steps.length > 0, - `'steps' array is empty in ${modelName} model response`, - ); + expectOrWarn(modelId, data.steps.length > 0, `'steps' array is empty in ${modelName} model response`); // Check data values if (data.prepTime) - expectOrWarn( - modelId, - data.prepTime > 0, - `'prepTime' is not positive in ${modelName} model response`, - ); + expectOrWarn(modelId, data.prepTime > 0, `'prepTime' is not positive in ${modelName} model response`); if (data.cookTime) - expectOrWarn( - modelId, - data.cookTime > 0, - `'cookTime' is not positive in ${modelName} model response`, - ); + expectOrWarn(modelId, data.cookTime > 0, `'cookTime' is not positive in ${modelName} model response`); if (data.servings) - expectOrWarn( - modelId, - data.servings > 0, - `'servings' is not positive in ${modelName} model response`, - ); + expectOrWarn(modelId, data.servings > 0, `'servings' is not positive in ${modelName} model response`); } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } }, TIMEOUT, @@ -776,7 +555,7 @@ describe("Simple callAi integration tests", () => { const result = await callAi( "Create a simple file system structure with a root directory containing two subdirectories, each with two files.", { - apiKey: process.env.CALLAI_API_KEY, + apiKey: callAiEnv.CALLAI_API_KEY, model: modelId.id, schema: simpleNestedSchema, }, @@ -794,29 +573,13 @@ describe("Simple callAi integration tests", () => { const data = JSON.parse(jsonContent); // Check top-level structure - expectOrWarn( - modelId, - !!data.root, - `Missing 'root' property in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.root, `Missing 'root' property in ${modelName} model response`); // Only continue if root exists and is an object if (data.root && typeof data.root === "object") { - expectOrWarn( - modelId, - !!data.root.name, - `Missing 'root.name' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.root.type, - `Missing 'root.type' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!data.root.children, - `Missing 'root.children' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!data.root.name, `Missing 'root.name' in ${modelName} model response`); + expectOrWarn(modelId, !!data.root.type, `Missing 'root.type' in ${modelName} model response`); + expectOrWarn(modelId, !!data.root.children, `Missing 'root.children' in ${modelName} model response`); // Check root properties if (data.root.name) @@ -844,33 +607,14 @@ describe("Simple callAi integration tests", () => { ); // Check first level of nesting - if ( - Array.isArray(data.root.children) && - data.root.children.length > 0 - ) { + if (Array.isArray(data.root.children) && data.root.children.length > 0) { const firstChild = data.root.children[0]; - expectOrWarn( - modelId, - !!firstChild, - `First child is undefined in ${modelName} model response`, - ); + expectOrWarn(modelId, !!firstChild, `First child is undefined in ${modelName} model response`); if (firstChild) { - expectOrWarn( - modelId, - !!firstChild.name, - `Missing 'firstChild.name' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!firstChild.type, - `Missing 'firstChild.type' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!firstChild.children, - `Missing 'firstChild.children' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!firstChild.name, `Missing 'firstChild.name' in ${modelName} model response`); + expectOrWarn(modelId, !!firstChild.type, `Missing 'firstChild.type' in ${modelName} model response`); + expectOrWarn(modelId, !!firstChild.children, `Missing 'firstChild.children' in ${modelName} model response`); if (firstChild.name) expectOrWarn( @@ -892,28 +636,13 @@ describe("Simple callAi integration tests", () => { ); // Check for at least one file in the second level - if ( - Array.isArray(firstChild.children) && - firstChild.children.length > 0 - ) { + if (Array.isArray(firstChild.children) && firstChild.children.length > 0) { const secondChild = firstChild.children[0]; - expectOrWarn( - modelId, - !!secondChild, - `Second child is undefined in ${modelName} model response`, - ); + expectOrWarn(modelId, !!secondChild, `Second child is undefined in ${modelName} model response`); if (secondChild) { - expectOrWarn( - modelId, - !!secondChild.name, - `Missing 'secondChild.name' in ${modelName} model response`, - ); - expectOrWarn( - modelId, - !!secondChild.type, - `Missing 'secondChild.type' in ${modelName} model response`, - ); + expectOrWarn(modelId, !!secondChild.name, `Missing 'secondChild.name' in ${modelName} model response`); + expectOrWarn(modelId, !!secondChild.type, `Missing 'secondChild.type' in ${modelName} model response`); if (secondChild.name) expectOrWarn( @@ -934,11 +663,7 @@ describe("Simple callAi integration tests", () => { } } } catch (e) { - expectOrWarn( - modelId, - false, - `JSON parse error in ${modelName} model response: ${e}`, - ); + expectOrWarn(modelId, false, `JSON parse error in ${modelName} model response: ${e}`); } }, TIMEOUT, diff --git a/test/openai-weather-wire.test.ts b/test/unit/openai-weather-wire.test.ts similarity index 78% rename from test/openai-weather-wire.test.ts rename to test/unit/openai-weather-wire.test.ts index 646ecdd..88aa800 100644 --- a/test/openai-weather-wire.test.ts +++ b/test/unit/openai-weather-wire.test.ts @@ -1,30 +1,28 @@ import fs from "fs"; import path from "path"; -import { callAi, Schema } from "../src/index"; +import { callAi, Schema } from "call-ai"; +import { beforeEach, describe, expect, it, Mock, vitest } from "vitest"; // Mock fetch to use our fixture files -global.fetch = jest.fn(); +global.fetch = vitest.fn(); describe("OpenAI Weather Streaming Tests", () => { // Read fixtures - const weatherRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/openai-weather-request.json"), - "utf8", - ), - ); - - const weatherResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/openai-weather-response.json"), - "utf8", - ); + // const weatherRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/openai-weather-request.json"), + // "utf8", + // ), + // ); + + const weatherResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/openai-weather-response.json"), "utf8"); beforeEach(() => { // Reset mocks - (global.fetch as jest.Mock).mockClear(); + (global.fetch as Mock).mockClear(); // Mock successful response for streaming request - (global.fetch as jest.Mock).mockImplementation(async (url, options) => { + (global.fetch as Mock).mockImplementation(async (_url, options) => { const requestBody = JSON.parse(options.body as string); if (requestBody.stream) { @@ -85,22 +83,19 @@ describe("OpenAI Weather Streaming Tests", () => { }; // Call the library with OpenAI model and streaming - const generator = (await callAi( - "Give me a weather forecast for New York in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4o", - schema: schema, - stream: true, - }, - )) as AsyncGenerator; + const generator = (await callAi("Give me a weather forecast for New York in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4o", + schema: schema, + stream: true, + })) as AsyncGenerator; // Verify that we get a generator back expect(generator).toBeTruthy(); expect(generator[Symbol.asyncIterator]).toBeDefined(); // Collect all chunks - let chunks: string[] = []; + const chunks: string[] = []; for await (const chunk of generator) { chunks.push(chunk); } diff --git a/test/openai-wire.test.ts b/test/unit/openai-wire.test.ts similarity index 71% rename from test/openai-wire.test.ts rename to test/unit/openai-wire.test.ts index c1aa3a9..7dd9c58 100644 --- a/test/openai-wire.test.ts +++ b/test/unit/openai-wire.test.ts @@ -1,45 +1,35 @@ -import fs from "fs"; -import path from "path"; -import { callAi, Schema } from "../src/index"; +import fs from "node:fs"; +import path from "node:path"; +import { callAi, Schema } from "call-ai"; +import { beforeEach, describe, expect, it, Mock, vitest } from "vitest"; // Mock fetch to use our fixture files -global.fetch = jest.fn(); +global.fetch = vitest.fn(); describe("OpenAI Wire Protocol Tests", () => { // Read fixtures - const openaiRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/openai-request.json"), - "utf8", - ), - ); - - const openaiResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/openai-response.json"), - "utf8", - ); - - const openaiStreamRequestFixture = JSON.parse( - fs.readFileSync( - path.join(__dirname, "fixtures/openai-stream-request.json"), - "utf8", - ), - ); - - const openaiStreamResponseFixture = fs.readFileSync( - path.join(__dirname, "fixtures/openai-stream-response.json"), - "utf8", - ); + const openaiRequestFixture = JSON.parse(fs.readFileSync(path.join(__dirname, "fixtures/openai-request.json"), "utf8")); + + const openaiResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/openai-response.json"), "utf8"); + + // const openaiStreamRequestFixture = JSON.parse( + // fs.readFileSync( + // path.join(__dirname, "fixtures/openai-stream-request.json"), + // "utf8", + // ), + // ); + + const openaiStreamResponseFixture = fs.readFileSync(path.join(__dirname, "fixtures/openai-stream-response.json"), "utf8"); beforeEach(() => { // Reset mocks - (global.fetch as jest.Mock).mockClear(); + (global.fetch as Mock).mockClear(); // Mock successful response for regular request - (global.fetch as jest.Mock).mockImplementation(async (url, options) => { + (global.fetch as Mock).mockImplementation(async (_url, options) => { const requestBody = JSON.parse(options.body as string); - let responseText; + // let responseText; if (requestBody.stream) { // Mock streaming response // In a real test, we'd need to properly mock a ReadableStream @@ -99,38 +89,27 @@ describe("OpenAI Wire Protocol Tests", () => { }; // Call the library function with the schema - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4o", - schema: schema, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4o", + schema: schema, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // Check that the essential parts match our fixture expect(actualRequestBody.model).toEqual(openaiRequestFixture.model); expect(actualRequestBody.messages).toEqual(openaiRequestFixture.messages); - expect(actualRequestBody.response_format.type).toEqual( - openaiRequestFixture.response_format.type, - ); + expect(actualRequestBody.response_format.type).toEqual(openaiRequestFixture.response_format.type); // Deep compare the json_schema part of response_format - expect(actualRequestBody.response_format.json_schema.name).toEqual( - openaiRequestFixture.response_format.json_schema.name, - ); + expect(actualRequestBody.response_format.json_schema.name).toEqual(openaiRequestFixture.response_format.json_schema.name); - expect( - actualRequestBody.response_format.json_schema.schema.properties, - ).toEqual( + expect(actualRequestBody.response_format.json_schema.schema.properties).toEqual( openaiRequestFixture.response_format.json_schema.schema.properties, ); }); @@ -149,14 +128,11 @@ describe("OpenAI Wire Protocol Tests", () => { }; // Call the library with OpenAI model - const result = await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4o", - schema: schema, - }, - ); + const result = await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4o", + schema: schema, + }); // Parse the OpenAI response fixture to get expected content const responseObj = JSON.parse(openaiResponseFixture); @@ -190,22 +166,19 @@ describe("OpenAI Wire Protocol Tests", () => { }; // Call the library with OpenAI model and streaming - const generator = (await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4o", - schema: schema, - stream: true, - }, - )) as AsyncGenerator; + const generator = (await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4o", + schema: schema, + stream: true, + })) as AsyncGenerator; // Verify that we get a generator back expect(generator).toBeTruthy(); expect(generator[Symbol.asyncIterator]).toBeDefined(); // Collect all chunks - let chunks: string[] = []; + const chunks: string[] = []; for await (const chunk of generator) { chunks.push(chunk); } @@ -232,34 +205,25 @@ describe("OpenAI Wire Protocol Tests", () => { }; // Call the library function with schema - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4o", - schema: schema, - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4o", + schema: schema, + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // GPT-4o should use response_format.json_schema for schema handling expect(actualRequestBody.response_format).toBeTruthy(); expect(actualRequestBody.response_format.type).toBe("json_schema"); expect(actualRequestBody.response_format.json_schema).toBeTruthy(); - expect(actualRequestBody.response_format.json_schema.name).toBe( - "book_recommendation", - ); + expect(actualRequestBody.response_format.json_schema.name).toBe("book_recommendation"); expect(actualRequestBody.response_format.json_schema.schema).toBeTruthy(); - expect( - actualRequestBody.response_format.json_schema.schema.properties.title, - ).toBeTruthy(); + expect(actualRequestBody.response_format.json_schema.schema.properties.title).toBeTruthy(); // No tools for OpenAI models expect(actualRequestBody.tools).toBeUndefined(); @@ -279,23 +243,18 @@ describe("OpenAI Wire Protocol Tests", () => { }; // Call the library function with schema - await callAi( - "Give me a short book recommendation in the requested format.", - { - apiKey: "test-api-key", - model: "openai/gpt-4o", - schema: schema, - useToolMode: true, // Custom option to enable tool mode for OpenAI - }, - ); + await callAi("Give me a short book recommendation in the requested format.", { + apiKey: "test-api-key", + model: "openai/gpt-4o", + schema: schema, + useToolMode: true, // Custom option to enable tool mode for OpenAI + }); // Verify fetch was called expect(global.fetch).toHaveBeenCalled(); // Get the request body that was passed to fetch - const actualRequestBody = JSON.parse( - (global.fetch as jest.Mock).mock.calls[0][1].body, - ); + const actualRequestBody = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); // If tool mode is enabled for OpenAI, it should use tools format if (actualRequestBody.tools) { @@ -304,9 +263,7 @@ describe("OpenAI Wire Protocol Tests", () => { expect(actualRequestBody.tool_choice).toBeTruthy(); expect(actualRequestBody.tools[0].name).toBe("book_recommendation"); expect(actualRequestBody.tools[0].input_schema).toBeTruthy(); - expect( - actualRequestBody.tools[0].input_schema.properties.title, - ).toBeTruthy(); + expect(actualRequestBody.tools[0].input_schema.properties.title).toBeTruthy(); // Should not use response_format when using tool mode expect(actualRequestBody.response_format).toBeUndefined(); diff --git a/test/unit/simple.claude.test.ts b/test/unit/simple.claude.test.ts new file mode 100644 index 0000000..718e06d --- /dev/null +++ b/test/unit/simple.claude.test.ts @@ -0,0 +1,122 @@ +import { vitest, describe, it, expect, beforeEach, Mock } from "vitest"; +import { callAi } from "call-ai"; + +// Mock global fetch +global.fetch = vitest.fn(); + +// Simple mock for TextDecoder +global.TextDecoder = vitest.fn().mockImplementation(() => ({ + decode: vitest.fn((value) => { + // Basic mock implementation without recursion + if (value instanceof Uint8Array) { + // Convert the Uint8Array to a simple string + return Array.from(value) + .map((byte) => String.fromCharCode(byte)) + .join(""); + } + return ""; + }), +})); + +describe("Claude JSON Property Splitting Test", () => { + beforeEach(() => { + vitest.clearAllMocks(); + }); + + it("should handle property name splitting across chunks", async () => { + // This test specifically targets Claude's JSON property splitting issue + const options = { + apiKey: "test-api-key", + model: "anthropic/claude-3-sonnet", + stream: true, + debug: true, // Enable debug mode to see what's happening + schema: { + type: "object", + properties: { + capital: { type: "string" }, + population: { type: "number" }, + languages: { type: "array", items: { type: "string" } }, + }, + }, + }; + + // Create a simple mock that focuses on the specific property splitting issue + const mockResponse = { + clone: () => mockResponse, + ok: true, + status: 200, + headers: { + forEach: vitest.fn(), + }, + body: { + getReader: vitest.fn().mockReturnValue({ + read: vitest + .fn() + // Streaming setup chunk + .mockResolvedValueOnce({ + done: false, + value: new TextEncoder().encode(`data: {"type":"message_start"}\n\n`), + }) + // First part with split property "popul" + .mockResolvedValueOnce({ + done: false, + value: new TextEncoder().encode( + // eslint-disable-next-line no-useless-escape + `data: {"type":"content_block_delta","delta":{"text":"{\\\"capital\\\":\\\"Paris\\\", \\\"popul"}}\n\n`, + ), + }) + // Second part with "ation" completing the property name + .mockResolvedValueOnce({ + done: false, + value: new TextEncoder().encode( + // eslint-disable-next-line no-useless-escape + `data: {"type":"content_block_delta","delta":{"text":"ation\\\":67.5, \\\"languages\\\":[\\\"French\\\"]}"}}\n\n`, + ), + }) + // Final chunk with tool_calls completion signal + .mockResolvedValueOnce({ + done: false, + value: new TextEncoder().encode(`data: {"type":"message_delta","delta":{"stop_reason":"tool_calls"}}\n\n`), + }) + // End of stream + .mockResolvedValueOnce({ + done: true, + }), + }), + }, + }; + + // Override global.fetch mock for this test + (global.fetch as Mock).mockResolvedValueOnce(mockResponse); + + const generator = (await callAi("Provide information about France.", options)) as AsyncGenerator; + + // The expected final parsed result + const expectedResult = { + capital: "Paris", + population: 67.5, + languages: ["French"], + }; + + // Collect results from streaming + let finalResult = ""; + let chunkCount = 0; + + for await (const chunk of generator) { + console.log(`Chunk ${++chunkCount}:`, chunk); + finalResult = chunk; + } + + console.log("Final result:", finalResult); + + // The key test - our implementation should produce valid JSON + // despite the property name "population" being split across chunks + const parsedResult = JSON.parse(finalResult); + + // Validate the parsed result matches our expectations + expect(parsedResult).toEqual(expectedResult); + + // We should receive a single chunk with the complete JSON + expect(chunkCount).toBe(1); + }); +}); diff --git a/test/unit.claude-json.test.ts b/test/unit/unit.claude-json.test.ts similarity index 94% rename from test/unit.claude-json.test.ts rename to test/unit/unit.claude-json.test.ts index d2bc24f..6425c25 100644 --- a/test/unit.claude-json.test.ts +++ b/test/unit/unit.claude-json.test.ts @@ -1,4 +1,4 @@ -import { describe, it, expect } from "@jest/globals"; +import { describe, it, expect } from "vitest"; // Import the relevant function or create a test-specific version of it // This test focuses directly on the JSON property splitting fix diff --git a/test/unit.claude.test.ts b/test/unit/unit.claude.test.ts similarity index 79% rename from test/unit.claude.test.ts rename to test/unit/unit.claude.test.ts index 7f19e26..c403471 100644 --- a/test/unit.claude.test.ts +++ b/test/unit/unit.claude.test.ts @@ -1,11 +1,12 @@ -import { callAi } from "../src/index"; +import { callAi } from "call-ai"; +import { describe, it, expect, beforeEach, vitest, Mock } from "vitest"; // Mock global fetch -global.fetch = jest.fn(); +global.fetch = vitest.fn(); // Simple mock for TextDecoder -global.TextDecoder = jest.fn().mockImplementation(() => ({ - decode: jest.fn((value) => { +global.TextDecoder = vitest.fn().mockImplementation(() => ({ + decode: vitest.fn((value) => { // Basic mock implementation without recursion if (value instanceof Uint8Array) { // Convert the Uint8Array to a simple string @@ -19,7 +20,7 @@ global.TextDecoder = jest.fn().mockImplementation(() => ({ describe("Claude Streaming JSON Property Splitting Test", () => { beforeEach(() => { - jest.clearAllMocks(); + vitest.clearAllMocks(); }); it.skip("should handle Claude property splitting in streaming responses", async () => { @@ -44,50 +45,48 @@ describe("Claude Streaming JSON Property Splitting Test", () => { ok: true, status: 200, body: { - getReader: jest.fn().mockReturnValue({ - read: jest + getReader: vitest.fn().mockReturnValue({ + read: vitest .fn() // First chunk: starts with {"capital" .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"{\\\"capital\\\""}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"{"capital"}}\n\n`, ), }) // Second chunk: continues with : "Paris", then partial "popul" .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":":\\\"Paris\\\", \\\"popul"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":":"Paris", "popul"}}\n\n`, ), }) // Third chunk: finishes "ation": 67.5 .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"ation\\\":67.5"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"ation":67.5"}}\n\n`, ), }) // Fourth chunk: starts with "lang" .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":", \\\"lang"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":", "lang"}}\n\n`, ), }) // Fifth chunk: finishes "uages":["French"]} .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"uages\\\":[\\\"French\\\"]}"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"uages":["French"]}"}}\n\n`, ), }) // Final chunk with finish reason "tool_calls" .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_stop","stop_reason":"tool_calls"}\n\n`, - ), + value: new TextEncoder().encode(`data: {"id":"123","type":"content_block_stop","stop_reason":"tool_calls"}\n\n`), }) .mockResolvedValueOnce({ done: true, @@ -97,12 +96,9 @@ describe("Claude Streaming JSON Property Splitting Test", () => { }; // Override the global.fetch mock for this test - (global.fetch as jest.Mock).mockResolvedValueOnce(mockResponse); + (global.fetch as Mock).mockResolvedValueOnce(mockResponse); - const generator = (await callAi( - "Provide information about France.", - options, - )) as AsyncGenerator; + const generator = (await callAi("Provide information about France.", options)) as AsyncGenerator; // Collect all chunks to simulate what would happen in the actual application const chunks: string[] = []; @@ -149,43 +145,41 @@ describe("Claude Streaming JSON Property Splitting Test", () => { ok: true, status: 200, body: { - getReader: jest.fn().mockReturnValue({ - read: jest + getReader: vitest.fn().mockReturnValue({ + read: vitest .fn() // First chunk: starts with {"capital": "Par .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"{\\\"capital\\\": \\\"Par"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"{"capital": "Par"}}\n\n`, ), }) // Second chunk: continues with "is", "population": 67 .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"is\\\", \\\"population\\\": 67"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"is", "population": 67"}}\n\n`, ), }) // Third chunk: completes with .5 and languages .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":".5, \\\"languages\\\": [\\\"Fren"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":".5, "languages": ["Fren"}}\n\n`, ), }) // Fourth chunk: completes with "ch"]} .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"ch\\\"]}"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"ch"]}"}}\n\n`, ), }) // Final chunk with finish reason "tool_calls" .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_stop","stop_reason":"tool_calls"}\n\n`, - ), + value: new TextEncoder().encode(`data: {"id":"123","type":"content_block_stop","stop_reason":"tool_calls"}\n\n`), }) .mockResolvedValueOnce({ done: true, @@ -195,14 +189,9 @@ describe("Claude Streaming JSON Property Splitting Test", () => { }; // Override the global.fetch mock for this test - (global.fetch as jest.Mock).mockResolvedValueOnce( - mockResponseWithSplitValues, - ); + (global.fetch as Mock).mockResolvedValueOnce(mockResponseWithSplitValues); - const generator = (await callAi( - "Provide information about France.", - options, - )) as AsyncGenerator; + const generator = (await callAi("Provide information about France.", options)) as AsyncGenerator; // Collect all chunks to simulate what would happen in the actual application const chunks: string[] = []; @@ -247,36 +236,34 @@ describe("Claude Streaming JSON Property Splitting Test", () => { ok: true, status: 200, body: { - getReader: jest.fn().mockReturnValue({ - read: jest + getReader: vitest.fn().mockReturnValue({ + read: vitest .fn() // First chunk: starts with {"capital": .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"{\\\"capital\\\": "}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":"{"capital": "}}\n\n`, ), }) // Second chunk: continues with , "population": 67.5 .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":", \\\"population\\\": 67.5"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":", "population": 67.5"}}\n\n`, ), }) // Third chunk: completes with languages .mockResolvedValueOnce({ done: false, value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":", \\\"languages\\\": [\\\"French\\\"]}"}}\n\n`, + `data: {"id":"123","type":"content_block_delta","delta":{"type":"text_delta","text":", "languages": ["French"]}"}}\n\n`, ), }) // Final chunk with finish reason "tool_calls" .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"id":"123","type":"content_block_stop","stop_reason":"tool_calls"}\n\n`, - ), + value: new TextEncoder().encode(`data: {"id":"123","type":"content_block_stop","stop_reason":"tool_calls"}\n\n`), }) .mockResolvedValueOnce({ done: true, @@ -286,14 +273,9 @@ describe("Claude Streaming JSON Property Splitting Test", () => { }; // Override the global.fetch mock for this test - (global.fetch as jest.Mock).mockResolvedValueOnce( - mockResponseWithMissingValue, - ); + (global.fetch as Mock).mockResolvedValueOnce(mockResponseWithMissingValue); - const generator = (await callAi( - "Provide information about France.", - options, - )) as AsyncGenerator; + const generator = (await callAi("Provide information about France.", options)) as AsyncGenerator; // Collect all chunks to simulate what would happen in the actual application const chunks: string[] = []; diff --git a/test/unit.no-await.test.ts b/test/unit/unit.no-await.test.ts similarity index 79% rename from test/unit.no-await.test.ts rename to test/unit/unit.no-await.test.ts index efaec9a..4582252 100644 --- a/test/unit.no-await.test.ts +++ b/test/unit/unit.no-await.test.ts @@ -1,14 +1,12 @@ -import { callAi, Message, Schema } from "../src/index"; -import dotenv from "dotenv"; - -dotenv.config(); +import { vitest, describe, it, expect, beforeEach, Mock, assert } from "vitest"; +import { callAi, Message, Schema } from "call-ai"; // Mock global fetch -global.fetch = jest.fn(); +global.fetch = vitest.fn(); // Simple mock for TextDecoder -global.TextDecoder = jest.fn().mockImplementation(() => ({ - decode: jest.fn((value) => { +global.TextDecoder = vitest.fn().mockImplementation(() => ({ + decode: vitest.fn((value) => { // Basic mock implementation without recursion if (value instanceof Uint8Array) { // Convert the Uint8Array to a simple string @@ -22,13 +20,13 @@ global.TextDecoder = jest.fn().mockImplementation(() => ({ // Mock ReadableStream const mockReader = { - read: jest.fn(), + read: vitest.fn(), }; const mockResponse = { - json: jest.fn(), + json: vitest.fn(), body: { - getReader: jest.fn().mockReturnValue(mockReader), + getReader: vitest.fn().mockReturnValue(mockReader), }, ok: true, // Ensure response is treated as successful status: 200, @@ -37,8 +35,8 @@ const mockResponse = { describe("callAi", () => { beforeEach(() => { - jest.clearAllMocks(); - (global.fetch as jest.Mock).mockResolvedValue(mockResponse); + vitest.clearAllMocks(); + (global.fetch as Mock).mockResolvedValue(mockResponse); }); it("should handle API key requirement for non-streaming", async () => { @@ -50,7 +48,7 @@ describe("callAi", () => { try { await callAi("Hello, AI"); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should be thrown because no API key was provided expect((error as Error).message).toContain("fail is not defined"); @@ -63,7 +61,7 @@ describe("callAi", () => { try { await callAi("Hello, AI", { stream: true }); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should be thrown because no API key was provided expect((error as Error).message).toContain("fail is not defined"); @@ -98,7 +96,7 @@ describe("callAi", () => { }), ); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.model).toBe("test-model"); expect(body.messages).toEqual([{ role: "user", content: "Hello, AI" }]); expect(body.temperature).toBe(0.7); @@ -117,16 +115,12 @@ describe("callAi", () => { // Mock successful response to avoid errors mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = callAi(prompt, options) as unknown as AsyncGenerator< - string, - string, - unknown - >; + const generator = callAi(prompt, options) as unknown as AsyncGenerator; await generator.next(); expect(global.fetch).toHaveBeenCalledTimes(1); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.model).toBe("test-model"); expect(body.messages).toEqual([{ role: "user", content: "Hello, AI" }]); expect(body.temperature).toBe(0.7); @@ -143,14 +137,10 @@ describe("callAi", () => { // Mock successful response to avoid errors mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = callAi(messages, options) as unknown as AsyncGenerator< - string, - string, - unknown - >; + const generator = callAi(messages, options) as unknown as AsyncGenerator; await generator.next(); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.messages).toEqual(messages); }); @@ -173,13 +163,10 @@ describe("callAi", () => { // Mock successful response to avoid errors mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = callAi( - "Get user info", - options, - ) as unknown as AsyncGenerator; + const generator = callAi("Get user info", options) as unknown as AsyncGenerator; await generator.next(); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.schema.required).toEqual(["name"]); }); @@ -204,8 +191,7 @@ describe("callAi", () => { choices: [ { message: { - content: - '{"todos": ["Learn React basics", "Build a simple app", "Master hooks"]}', + content: '{"todos": ["Learn React basics", "Build a simple app", "Master hooks"]}', }, }, ], @@ -213,18 +199,23 @@ describe("callAi", () => { await callAi("Give me a todo list for learning React", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); - expect(body.response_format.json_schema.schema.properties).toEqual( - todoSchema.properties, - ); + expect(body.response_format.json_schema.schema.properties).toEqual(todoSchema.properties); }); it("should handle aliens schema example", async () => { - const alienSchema: Schema = { + const alienSchema = { properties: { aliens: { type: "array", + properties: { + aliens: { + items: { + properties: {}, + }, + }, + }, items: { type: "object", properties: { @@ -239,13 +230,12 @@ describe("callAi", () => { }, }, }, - }; + } satisfies Schema; const messages: Message[] = [ { role: "user" as const, - content: - "Generate 3 unique alien species with unique biological traits, appearance, and preferred environments.", + content: "Generate 3 unique alien species with unique biological traits, appearance, and preferred environments.", }, ]; @@ -259,27 +249,18 @@ describe("callAi", () => { // Mock successful response mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = callAi(messages, options) as unknown as AsyncGenerator< - string, - string, - unknown - >; + const generator = callAi(messages, options) as unknown as AsyncGenerator; await generator.next(); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); // The schema is processed with additionalProperties and required fields // So we just check that the main structure is preserved - expect(body.response_format.json_schema.schema.properties.aliens.type).toBe( - "array", + expect(body.response_format.json_schema.schema.properties.aliens.type).toBe("array"); + expect(body.response_format.json_schema.schema.properties.aliens.items.type).toBe("object"); + expect(body.response_format.json_schema.schema.properties.aliens.items.properties).toEqual( + alienSchema.properties.aliens.items.properties, ); - expect( - body.response_format.json_schema.schema.properties.aliens.items.type, - ).toBe("object"); - expect( - body.response_format.json_schema.schema.properties.aliens.items - .properties, - ).toEqual(alienSchema.properties.aliens.items.properties); expect(body.model).toBe("openai/gpt-4o"); expect(body.stream).toBe(true); }); @@ -320,7 +301,7 @@ describe("callAi", () => { await callAi("Test with schema name", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("test_schema"); }); @@ -344,7 +325,7 @@ describe("callAi", () => { await callAi("Test without schema name", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("result"); }); @@ -368,7 +349,7 @@ describe("callAi", () => { await callAi("Generate content with schema", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("result"); }); @@ -390,7 +371,7 @@ describe("callAi", () => { await callAi("Test with empty schema", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("result"); expect(body.response_format.json_schema.schema.properties).toEqual({}); @@ -415,8 +396,7 @@ describe("callAi", () => { choices: [ { message: { - content: - '{"result": "Test successful", "extra": "Additional field"}', + content: '{"result": "Test successful", "extra": "Additional field"}', }, }, ], @@ -424,20 +404,18 @@ describe("callAi", () => { await callAi("Test with additionalProperties", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); - expect(body.response_format.json_schema.schema.additionalProperties).toBe( - true, - ); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); + expect(body.response_format.json_schema.schema.additionalProperties).toBe(true); }); it("should handle errors during API call for non-streaming", async () => { - (global.fetch as jest.Mock).mockRejectedValue(new Error("Network error")); + (global.fetch as Mock).mockRejectedValue(new Error("Network error")); try { const options = { apiKey: "test-api-key" }; await callAi("Hello", options); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should contain the network error message expect((error as Error).message).toContain("Network error"); @@ -445,13 +423,13 @@ describe("callAi", () => { }); it("should handle errors during API call for streaming", async () => { - (global.fetch as jest.Mock).mockRejectedValue(new Error("Network error")); + (global.fetch as Mock).mockRejectedValue(new Error("Network error")); try { const options = { apiKey: "test-api-key", stream: true }; await callAi("Hello", options); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should contain the network error message expect((error as Error).message).toContain("Network error"); @@ -467,7 +445,7 @@ describe("callAi", () => { await callAi("Hello", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.stream).toBe(false); }); @@ -503,8 +481,7 @@ describe("callAi", () => { choices: [ { message: { - content: - '{"title":"Healthy Living","description":"A playlist to inspire a healthy lifestyle"}', + content: '{"title":"Healthy Living","description":"A playlist to inspire a healthy lifestyle"}', }, }, ], @@ -512,7 +489,7 @@ describe("callAi", () => { await callAi("Create a themed music playlist", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); // Check that schema property exists in json_schema containing the schema definition expect(body.response_format.json_schema.schema).toBeDefined(); @@ -529,9 +506,7 @@ describe("callAi", () => { expect(schemaProperties.songs.items.properties.comment.type).toBe("string"); // Check that required fields are passed through - expect(body.response_format.json_schema.schema.required).toEqual( - schema.required, - ); + expect(body.response_format.json_schema.schema.required).toEqual(schema.required); }); it("should handle streaming with schema for structured output", async () => { @@ -555,26 +530,20 @@ describe("callAi", () => { ok: true, status: 200, body: { - getReader: jest.fn().mockReturnValue({ - read: jest + getReader: vitest.fn().mockReturnValue({ + read: vitest .fn() .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"choices":[{"delta":{"content":"{\\"temp"}}]}\n\n`, - ), + value: new TextEncoder().encode(`data: {"choices":[{"delta":{"content":"{\\"temp"}}]}\n\n`), }) .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"choices":[{"delta":{"content":"erature\\": 22, \\"cond"}}]}\n\n`, - ), + value: new TextEncoder().encode(`data: {"choices":[{"delta":{"content":"erature\\": 22, \\"cond"}}]}\n\n`), }) .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"choices":[{"delta":{"content":"itions\\": \\"Sunny\\"}"}}]}\n\n`, - ), + value: new TextEncoder().encode(`data: {"choices":[{"delta":{"content":"itions\\": \\"Sunny\\"}"}}]}\n\n`), }) .mockResolvedValueOnce({ done: true, @@ -584,12 +553,9 @@ describe("callAi", () => { }; // Override the global.fetch mock for this test - (global.fetch as jest.Mock).mockResolvedValueOnce(mockResponseWithBody); + (global.fetch as Mock).mockResolvedValueOnce(mockResponseWithBody); - const generator = callAi( - "What is the weather?", - options, - ) as unknown as AsyncGenerator; + const generator = callAi("What is the weather?", options) as unknown as AsyncGenerator; // Manually iterate and collect let finalValue = ""; @@ -600,7 +566,7 @@ describe("callAi", () => { } // Verify request format - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("weather"); expect(body.stream).toBe(true); diff --git a/test/unit.test.ts b/test/unit/unit.test.ts similarity index 79% rename from test/unit.test.ts rename to test/unit/unit.test.ts index b02cf9f..b02f421 100644 --- a/test/unit.test.ts +++ b/test/unit/unit.test.ts @@ -1,14 +1,12 @@ -import { callAi, Message, Schema } from "../src/index"; -import dotenv from "dotenv"; - -dotenv.config(); +import { vitest, describe, it, expect, beforeEach, Mock, assert } from "vitest"; +import { callAi, Message, Schema } from "call-ai"; // Mock global fetch -global.fetch = jest.fn(); +global.fetch = vitest.fn(); // Simple mock for TextDecoder -global.TextDecoder = jest.fn().mockImplementation(() => ({ - decode: jest.fn((value) => { +global.TextDecoder = vitest.fn().mockImplementation(() => ({ + decode: vitest.fn((value) => { // Basic mock implementation without recursion if (value instanceof Uint8Array) { // Convert the Uint8Array to a simple string @@ -22,13 +20,13 @@ global.TextDecoder = jest.fn().mockImplementation(() => ({ // Mock ReadableStream const mockReader = { - read: jest.fn(), + read: vitest.fn(), }; const mockResponse = { - json: jest.fn(), + json: vitest.fn(), body: { - getReader: jest.fn().mockReturnValue(mockReader), + getReader: vitest.fn().mockReturnValue(mockReader), }, ok: true, // Ensure response is treated as successful status: 200, @@ -37,8 +35,8 @@ const mockResponse = { describe("callAi", () => { beforeEach(() => { - jest.clearAllMocks(); - (global.fetch as jest.Mock).mockResolvedValue(mockResponse); + vitest.clearAllMocks(); + (global.fetch as Mock).mockResolvedValue(mockResponse); }); it("should handle API key requirement for non-streaming", async () => { @@ -50,7 +48,7 @@ describe("callAi", () => { try { await callAi("Hello, AI"); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should be thrown because no API key was provided expect((error as Error).message).toContain("fail is not defined"); @@ -63,7 +61,7 @@ describe("callAi", () => { try { await callAi("Hello, AI", { stream: true }); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should be thrown because no API key was provided expect((error as Error).message).toContain("fail is not defined"); @@ -98,7 +96,7 @@ describe("callAi", () => { }), ); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.model).toBe("test-model"); expect(body.messages).toEqual([{ role: "user", content: "Hello, AI" }]); expect(body.temperature).toBe(0.7); @@ -117,16 +115,12 @@ describe("callAi", () => { // Mock successful response to avoid errors mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = (await callAi(prompt, options)) as AsyncGenerator< - string, - string, - unknown - >; + const generator = (await callAi(prompt, options)) as AsyncGenerator; await generator.next(); expect(global.fetch).toHaveBeenCalledTimes(1); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.model).toBe("test-model"); expect(body.messages).toEqual([{ role: "user", content: "Hello, AI" }]); expect(body.temperature).toBe(0.7); @@ -143,14 +137,10 @@ describe("callAi", () => { // Mock successful response to avoid errors mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = (await callAi(messages, options)) as AsyncGenerator< - string, - string, - unknown - >; + const generator = (await callAi(messages, options)) as AsyncGenerator; await generator.next(); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.messages).toEqual(messages); }); @@ -173,13 +163,10 @@ describe("callAi", () => { // Mock successful response to avoid errors mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = (await callAi( - "Get user info", - options, - )) as AsyncGenerator; + const generator = (await callAi("Get user info", options)) as AsyncGenerator; await generator.next(); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.schema.required).toEqual(["name"]); }); @@ -204,8 +191,7 @@ describe("callAi", () => { choices: [ { message: { - content: - '{"todos": ["Learn React basics", "Build a simple app", "Master hooks"]}', + content: '{"todos": ["Learn React basics", "Build a simple app", "Master hooks"]}', }, }, ], @@ -213,15 +199,13 @@ describe("callAi", () => { await callAi("Give me a todo list for learning React", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); - expect(body.response_format.json_schema.schema.properties).toEqual( - todoSchema.properties, - ); + expect(body.response_format.json_schema.schema.properties).toEqual(todoSchema.properties); }); it("should handle aliens schema example", async () => { - const alienSchema: Schema = { + const alienSchema = { properties: { aliens: { type: "array", @@ -239,13 +223,12 @@ describe("callAi", () => { }, }, }, - }; + } satisfies Schema; const messages: Message[] = [ { role: "user" as const, - content: - "Generate 3 unique alien species with unique biological traits, appearance, and preferred environments.", + content: "Generate 3 unique alien species with unique biological traits, appearance, and preferred environments.", }, ]; @@ -259,27 +242,18 @@ describe("callAi", () => { // Mock successful response mockReader.read.mockResolvedValueOnce({ done: true }); - const generator = (await callAi(messages, options)) as AsyncGenerator< - string, - string, - unknown - >; + const generator = (await callAi(messages, options)) as AsyncGenerator; await generator.next(); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); // The schema is processed with additionalProperties and required fields // So we just check that the main structure is preserved - expect(body.response_format.json_schema.schema.properties.aliens.type).toBe( - "array", + expect(body.response_format.json_schema.schema.properties.aliens.type).toBe("array"); + expect(body.response_format.json_schema.schema.properties.aliens.items.type).toBe("object"); + expect(body.response_format.json_schema.schema.properties.aliens.items.properties).toEqual( + alienSchema.properties.aliens.items.properties, ); - expect( - body.response_format.json_schema.schema.properties.aliens.items.type, - ).toBe("object"); - expect( - body.response_format.json_schema.schema.properties.aliens.items - .properties, - ).toEqual(alienSchema.properties.aliens.items.properties); expect(body.model).toBe("openai/gpt-4o"); expect(body.stream).toBe(true); }); @@ -320,7 +294,7 @@ describe("callAi", () => { await callAi("Test with schema name", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("test_schema"); }); @@ -344,7 +318,7 @@ describe("callAi", () => { await callAi("Test without schema name", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("result"); }); @@ -368,7 +342,7 @@ describe("callAi", () => { await callAi("Generate content with schema", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("result"); }); @@ -390,7 +364,7 @@ describe("callAi", () => { await callAi("Test with empty schema", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("result"); expect(body.response_format.json_schema.schema.properties).toEqual({}); @@ -415,8 +389,7 @@ describe("callAi", () => { choices: [ { message: { - content: - '{"result": "Test successful", "extra": "Additional field"}', + content: '{"result": "Test successful", "extra": "Additional field"}', }, }, ], @@ -424,20 +397,18 @@ describe("callAi", () => { await callAi("Test with additionalProperties", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); - expect(body.response_format.json_schema.schema.additionalProperties).toBe( - true, - ); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); + expect(body.response_format.json_schema.schema.additionalProperties).toBe(true); }); it("should handle errors during API call for non-streaming", async () => { - (global.fetch as jest.Mock).mockRejectedValue(new Error("Network error")); + (global.fetch as Mock).mockRejectedValue(new Error("Network error")); try { const options = { apiKey: "test-api-key" }; await callAi("Hello", options); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should contain the network error message expect((error as Error).message).toContain("Network error"); @@ -445,13 +416,13 @@ describe("callAi", () => { }); it("should handle errors during API call for streaming", async () => { - (global.fetch as jest.Mock).mockRejectedValue(new Error("Network error")); + (global.fetch as Mock).mockRejectedValue(new Error("Network error")); try { const options = { apiKey: "test-api-key", stream: true }; await callAi("Hello", options); // If we get here, the test should fail because an error should have been thrown - fail("Expected an error to be thrown"); + assert.fail("Expected an error to be thrown"); } catch (error) { // Error should contain the network error message expect((error as Error).message).toContain("Network error"); @@ -467,7 +438,7 @@ describe("callAi", () => { await callAi("Hello", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.stream).toBe(false); }); @@ -503,8 +474,7 @@ describe("callAi", () => { choices: [ { message: { - content: - '{"title":"Healthy Living","description":"A playlist to inspire a healthy lifestyle"}', + content: '{"title":"Healthy Living","description":"A playlist to inspire a healthy lifestyle"}', }, }, ], @@ -512,7 +482,7 @@ describe("callAi", () => { await callAi("Create a themed music playlist", options); - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); // Check that schema property exists in json_schema containing the schema definition expect(body.response_format.json_schema.schema).toBeDefined(); @@ -529,9 +499,7 @@ describe("callAi", () => { expect(schemaProperties.songs.items.properties.comment.type).toBe("string"); // Check that required fields are passed through - expect(body.response_format.json_schema.schema.required).toEqual( - schema.required, - ); + expect(body.response_format.json_schema.schema.required).toEqual(schema.required); }); it("should handle streaming with schema for structured output", async () => { @@ -555,26 +523,20 @@ describe("callAi", () => { ok: true, status: 200, body: { - getReader: jest.fn().mockReturnValue({ - read: jest + getReader: vitest.fn().mockReturnValue({ + read: vitest .fn() .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"choices":[{"delta":{"content":"{\\"temp"}}]}\n\n`, - ), + value: new TextEncoder().encode(`data: {"choices":[{"delta":{"content":"{\\"temp"}}]}\n\n`), }) .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"choices":[{"delta":{"content":"erature\\": 22, \\"cond"}}]}\n\n`, - ), + value: new TextEncoder().encode(`data: {"choices":[{"delta":{"content":"erature\\": 22, \\"cond"}}]}\n\n`), }) .mockResolvedValueOnce({ done: false, - value: new TextEncoder().encode( - `data: {"choices":[{"delta":{"content":"itions\\": \\"Sunny\\"}"}}]}\n\n`, - ), + value: new TextEncoder().encode(`data: {"choices":[{"delta":{"content":"itions\\": \\"Sunny\\"}"}}]}\n\n`), }) .mockResolvedValueOnce({ done: true, @@ -584,12 +546,9 @@ describe("callAi", () => { }; // Override the global.fetch mock for this test - (global.fetch as jest.Mock).mockResolvedValueOnce(mockResponseWithBody); + (global.fetch as Mock).mockResolvedValueOnce(mockResponseWithBody); - const generator = (await callAi( - "What is the weather?", - options, - )) as AsyncGenerator; + const generator = (await callAi("What is the weather?", options)) as AsyncGenerator; // Manually iterate and collect let finalValue = ""; @@ -600,7 +559,7 @@ describe("callAi", () => { } // Verify request format - const body = JSON.parse((global.fetch as jest.Mock).mock.calls[0][1].body); + const body = JSON.parse((global.fetch as Mock).mock.calls[0][1].body); expect(body.response_format.type).toBe("json_schema"); expect(body.response_format.json_schema.name).toBe("weather"); expect(body.stream).toBe(true); diff --git a/test/unit/vitest.config.ts b/test/unit/vitest.config.ts new file mode 100644 index 0000000..3af4ccf --- /dev/null +++ b/test/unit/vitest.config.ts @@ -0,0 +1,9 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + retry: 2, + name: "unit", + include: ["*test.?(c|m)[jt]s?(x)"], + }, +}); diff --git a/test/vitest.config.ts b/test/vitest.config.ts new file mode 100644 index 0000000..a1b7391 --- /dev/null +++ b/test/vitest.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + projects: ["unit/vitest.config.ts", "integration/vitest.config.ts"], + }, +}); diff --git a/tsconfig.json b/tsconfig.json index 17fef4a..80f9029 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,16 +1,23 @@ { "compilerOptions": { - "target": "es2020", - "module": "commonjs", - "declaration": true, + "target": "es2022", + + "module": "nodenext", + "moduleResolution": "nodenext", + "lib": ["DOM", "DOM.Iterable", "ES2020"], + + "jsx": "react", + "allowSyntheticDefaultImports": true, + "sourceMap": true, "outDir": "./dist", + "removeComments": true, + "noEmit": true, + "forceConsistentCasingInFileNames": true, "strict": true, - "esModuleInterop": true, + "alwaysStrict": true, "skipLibCheck": true, - "forceConsistentCasingInFileNames": true, - "noUnusedLocals": true, - "noUnusedParameters": true + "types": ["node"] }, - "include": ["src/**/*"], - "exclude": ["node_modules", "**/*.test.ts"] -} \ No newline at end of file + "include": ["call-ai/**/*", "test/**/*"], + "exclude": ["**/dist/**", "**/node_modules/**"] +} diff --git a/vitest.config.ts b/vitest.config.ts new file mode 100644 index 0000000..107f33a --- /dev/null +++ b/vitest.config.ts @@ -0,0 +1,7 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + projects: ["test/vitest.config.ts"], + }, +});