Skip to content
This repository was archived by the owner on Aug 23, 2025. It is now read-only.

Commit 2df325e

Browse files
committed
wip: first run of cleanup
1 parent dceb377 commit 2df325e

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

55 files changed

+852
-785
lines changed

eslint.config.mjs

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
import eslint from "@eslint/js";
2+
import tseslint from "typescript-eslint";
3+
import importPlugin from "eslint-plugin-import";
4+
5+
const opts = tseslint.config(
6+
eslint.configs.recommended,
7+
// ...tseslint.configs.recommended,
8+
...tseslint.configs.strict,
9+
...tseslint.configs.stylistic,
10+
{
11+
languageOptions: {
12+
globals: {
13+
queueMicrotask: "readonly",
14+
},
15+
},
16+
},
17+
{
18+
ignores: [
19+
"babel.config.cjs",
20+
"jest.config.js",
21+
"**/dist/",
22+
"**/pubdir/",
23+
"**/node_modules/",
24+
"**/scripts/",
25+
"**/examples/",
26+
"scripts/",
27+
"smoke/react/",
28+
"src/missingTypes/lib.deno.d.ts",
29+
"**/.cache/**",
30+
"**/.esm-cache/**",
31+
"**/.wrangler/**",
32+
],
33+
},
34+
{
35+
plugins: {
36+
import: importPlugin,
37+
},
38+
rules: {
39+
// "no-console": ["warn"],
40+
"import/no-duplicates": ["error"],
41+
},
42+
},
43+
{
44+
rules: {
45+
// "no-restricted-globals": ["error", "URL", "TextDecoder", "TextEncoder"],
46+
},
47+
},
48+
);
49+
50+
export default opts;

package.json

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
"version": "0.10.2",
44
"description": "Lightweight library for making AI API calls with streaming support",
55
"main": "dist/index.js",
6+
"type": "module",
67
"browser": "dist/index.js",
78
"types": "dist/index.d.ts",
89
"files": [
@@ -18,13 +19,14 @@
1819
},
1920
"scripts": {
2021
"build": "tsc",
21-
"test": "jest --testMatch=\"**/*unit*.test.ts\"",
22-
"test:integration": "jest simple.integration",
22+
"lint": "eslint .",
23+
"test": "vitest --run",
24+
"test:integration": "vitest simple.integration",
2325
"test:all": "pnpm test && pnpm test:integration",
2426
"prepublishOnly": "npm run build",
2527
"typecheck": "tsc --noEmit",
2628
"format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"",
27-
"coverage": "jest --coverage",
29+
"coverage": "vitest --coverage",
2830
"check": "npm run typecheck && npm run format && npm run test && npm run build"
2931
},
3032
"keywords": [
@@ -39,15 +41,20 @@
3941
"author": "",
4042
"license": "MIT or Apache-2.0",
4143
"devDependencies": {
44+
"@eslint/js": "^9.31.0",
4245
"@types/jest": "^29.5.3",
4346
"@types/node": "^20.4.2",
4447
"@types/node-fetch": "^2.6.12",
45-
"dotenv": "^16.4.7",
48+
"eslint": "^9.31.0",
49+
"eslint-plugin-import": "^2.32.0",
4650
"jest": "^29.6.1",
4751
"node-fetch": "^3.3.2",
4852
"prettier": "^3.5.3",
4953
"ts-jest": "^29.1.1",
50-
"typescript": "^5.1.6"
54+
"typescript": "^5.1.6",
55+
"typescript-eslint": "^8.37.0",
56+
"vitest": "^3.2.4",
57+
"zx": "^8.7.1"
5158
},
5259
"engines": {
5360
"node": ">=14.0.0"

src/api-core.ts

Lines changed: 24 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,17 @@ import {
99
Schema,
1010
StreamResponse,
1111
ThenableStreamResponse,
12-
} from "./types";
13-
import { globalDebug } from "./key-management";
14-
import { callAINonStreaming } from "./non-streaming";
15-
import { callAIStreaming } from "./streaming";
12+
isToolUseType,
13+
isToolUseResponse,
14+
isOpenAIArray,
15+
} from "./types.js";
16+
import { globalDebug } from "./key-management.js";
17+
import { callAINonStreaming } from "./non-streaming.js";
18+
import { callAIStreaming } from "./streaming.js";
19+
import { PACKAGE_VERSION } from "./version.js";
20+
import { callAiEnv } from "./utils.js";
1621

1722
// Import package version for debugging
18-
// eslint-disable-next-line @typescript-eslint/no-var-requires
19-
const PACKAGE_VERSION = require("../package.json").version;
2023

2124
/**
2225
* Main API interface function for making AI API calls
@@ -38,12 +41,15 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
3841
let schemaStrategy: SchemaStrategy = {
3942
strategy: "none" as const,
4043
model: options.model || "openai/gpt-3.5-turbo",
41-
prepareRequest: () => ({}),
42-
processResponse: (response: any) => {
44+
prepareRequest: () => (undefined),
45+
processResponse: (response) => {
4346
// If response is an object, stringify it to match expected test output
4447
if (response && typeof response === "object") {
4548
return JSON.stringify(response);
4649
}
50+
if (typeof response !== "string") {
51+
throw new Error(`Unexpected response type: ${typeof response}`);
52+
}
4753
return response;
4854
},
4955
shouldForceStream: false,
@@ -61,11 +67,16 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
6167
shouldForceStream: false,
6268
prepareRequest: (schema) => {
6369
// Parse the schema to extract the function definition
64-
let toolDef: any = {};
70+
let toolDef: {
71+
name?: string;
72+
description?: string;
73+
parameters?: unknown;
74+
} = {};
6575

6676
if (typeof schema === "string") {
6777
try {
6878
toolDef = JSON.parse(schema);
79+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
6980
} catch (e) {
7081
// If it's not valid JSON, we'll use it as a plain description
7182
toolDef = { description: schema };
@@ -104,17 +115,17 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
104115
}
105116

106117
// Handle direct tool_use format
107-
if (response && response.type === "tool_use") {
118+
if (isToolUseType(response)) {
108119
return response.input || "{}";
109120
}
110121

111122
// Handle object with tool_use property
112-
if (response && response.tool_use) {
123+
if (isToolUseResponse(response)) {
113124
return response.tool_use.input || "{}";
114125
}
115126

116127
// Handle array of tool calls (OpenAI format)
117-
if (Array.isArray(response)) {
128+
if (isOpenAIArray(response)) {
118129
if (
119130
response.length > 0 &&
120131
response[0].function &&
@@ -308,9 +319,7 @@ function prepareRequestParams(
308319
options: CallAIOptions = {},
309320
) {
310321
// Get API key from options or window.CALLAI_API_KEY (exactly matching original)
311-
const apiKey =
312-
options.apiKey ||
313-
(typeof window !== "undefined" ? (window as any).CALLAI_API_KEY : null);
322+
const apiKey = options.apiKey || callAiEnv.CALLAI_API_KEY
314323

315324
// Validate API key with original error message
316325
if (!apiKey) {

src/api.ts

Lines changed: 51 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,22 @@
22
* Core API implementation for call-ai
33
*/
44
import {
5+
CallAIError,
56
CallAIOptions,
67
Message,
78
ResponseMeta,
89
SchemaStrategy,
910
StreamResponse,
10-
} from "./types";
11-
import { chooseSchemaStrategy } from "./strategies";
12-
import { responseMetadata, boxString, getMeta } from "./response-metadata";
13-
import { keyStore, globalDebug } from "./key-management";
14-
import { handleApiError, checkForInvalidModelError } from "./error-handling";
15-
import { createBackwardCompatStreamingProxy } from "./api-core";
16-
import { extractContent, extractClaudeResponse } from "./non-streaming";
17-
import { createStreamingGenerator } from "./streaming";
11+
} from "./types.js";
12+
import { chooseSchemaStrategy } from "./strategies/index.js";
13+
import { responseMetadata, boxString } from "./response-metadata.js";
14+
import { keyStore, globalDebug } from "./key-management.js";
15+
import { handleApiError, checkForInvalidModelError } from "./error-handling.js";
16+
import { createBackwardCompatStreamingProxy } from "./api-core.js";
17+
import { extractContent, extractClaudeResponse } from "./non-streaming.js";
18+
import { createStreamingGenerator } from "./streaming.js";
19+
import { PACKAGE_VERSION } from "./version.js";
20+
import { callAiEnv } from "./utils.js";
1821

1922
// Key management is now imported from ./key-management
2023

@@ -33,11 +36,10 @@ import { createStreamingGenerator } from "./streaming";
3336

3437
// boxString and getMeta functions are now imported from ./response-metadata
3538
// Re-export getMeta to maintain backward compatibility
36-
export { getMeta };
39+
// export { getMeta };
3740

3841
// Import package version for debugging
39-
// eslint-disable-next-line @typescript-eslint/no-var-requires
40-
const PACKAGE_VERSION = require("../package.json").version;
42+
4143

4244
// Default fallback model when the primary model fails or is unavailable
4345
const FALLBACK_MODEL = "openrouter/auto";
@@ -252,13 +254,13 @@ export function callAi(
252254
}
253255

254256
// Create error with standard format
255-
const error = new Error(errorMessage);
256-
257-
// Add useful metadata
258-
(error as any).status = response.status;
259-
(error as any).statusText = response.statusText;
260-
(error as any).details = errorJson;
261-
(error as any).contentType = contentType;
257+
const error = new CallAIError({
258+
message: errorMessage,
259+
status: response.status,
260+
statusText: response.statusText,
261+
details: errorJson,
262+
contentType,
263+
})
262264
throw error;
263265
} catch (jsonError) {
264266
// If JSON parsing fails, extract a useful message from the raw error body
@@ -295,11 +297,13 @@ export function callAi(
295297
);
296298
}
297299

298-
const error = new Error(errorMessage);
299-
(error as any).status = response.status;
300-
(error as any).statusText = response.statusText;
301-
(error as any).details = errorBody;
302-
(error as any).contentType = contentType;
300+
const error = new CallAIError({
301+
message: errorMessage,
302+
status: response.status,
303+
statusText: response.statusText,
304+
details: errorBody,
305+
contentType,
306+
})
303307
throw error;
304308
}
305309
} catch (responseError) {
@@ -309,12 +313,14 @@ export function callAi(
309313
}
310314

311315
// Fallback error
312-
const error = new Error(
313-
`API returned ${response.status}: ${response.statusText}`,
314-
);
315-
(error as any).status = response.status;
316-
(error as any).statusText = response.statusText;
317-
(error as any).contentType = contentType;
316+
const error = new CallAIError(
317+
{
318+
message: `API returned ${response.status}: ${response.statusText}`,
319+
status: response.status,
320+
statusText: response.statusText,
321+
details: undefined,
322+
contentType,
323+
})
318324
throw error;
319325
}
320326
}
@@ -328,7 +334,7 @@ export function callAi(
328334
})();
329335

330336
// For backward compatibility with v0.6.x where users didn't await the result
331-
if (process.env.NODE_ENV !== "production") {
337+
if (callAiEnv.NODE_ENV !== "production") {
332338
if (options.debug) {
333339
console.warn(
334340
`[callAi:${PACKAGE_VERSION}] No await found - using legacy streaming pattern. This will be removed in a future version and may cause issues with certain models.`,
@@ -337,7 +343,7 @@ export function callAi(
337343
}
338344

339345
// Create a proxy object that acts both as a Promise and an AsyncGenerator for backward compatibility
340-
// @ts-ignore - We're deliberately implementing a proxy with dual behavior
346+
//... @ts-ignore - We're deliberately implementing a proxy with dual behavior
341347
return createBackwardCompatStreamingProxy(streamPromise);
342348
}
343349

@@ -414,6 +420,7 @@ async function bufferStreamingResults(
414420

415421
// checkForInvalidModelError is imported from error-handling.ts
416422

423+
417424
/**
418425
* Prepare request parameters common to both streaming and non-streaming calls
419426
*/
@@ -428,10 +435,9 @@ function prepareRequestParams(
428435
schemaStrategy: SchemaStrategy;
429436
} {
430437
// First try to get the API key from options or window globals
431-
let apiKey =
438+
const apiKey =
432439
options.apiKey ||
433-
keyStore.current || // Try keyStore first in case it was refreshed in a previous call
434-
(typeof window !== "undefined" ? (window as any).CALLAI_API_KEY : null);
440+
keyStore.current || callAiEnv.CALLAI_API_KEY() // Try keyStore first in case it was refreshed in a previous call
435441
const schema = options.schema || null;
436442

437443
// If no API key exists, we won't throw immediately. We'll continue and let handleApiError
@@ -442,12 +448,7 @@ function prepareRequestParams(
442448
const model = schemaStrategy.model;
443449

444450
// Get custom chat API origin if set
445-
const customChatOrigin =
446-
options.chatUrl ||
447-
(typeof window !== "undefined" ? (window as any).CALLAI_CHAT_URL : null) ||
448-
(typeof process !== "undefined" && process.env
449-
? process.env.CALLAI_CHAT_URL
450-
: null);
451+
const customChatOrigin = options.chatUrl || callAiEnv.CALLAI_CHAT_URL;
451452

452453
// Use custom origin or default OpenRouter URL
453454
const endpoint =
@@ -462,14 +463,14 @@ function prepareRequestParams(
462463
: [{ role: "user", content: prompt }];
463464

464465
// Common parameters for both streaming and non-streaming
465-
const requestParams: any = {
466+
const requestParams: CallAIOptions = {
466467
model,
467468
messages,
468469
stream: options.stream !== undefined ? options.stream : false,
469470
};
470471

471472
// Only include temperature if explicitly set
472-
if (options.temperature !== undefined) {
473+
if (options.temperature) {
473474
requestParams.temperature = options.temperature;
474475
}
475476

@@ -555,7 +556,7 @@ function prepareRequestParams(
555556
async function callAINonStreaming(
556557
prompt: string | Message[],
557558
options: CallAIOptions = {},
558-
isRetry: boolean = false,
559+
isRetry = false,
559560
): Promise<string> {
560561
try {
561562
// Start timing for metadata
@@ -593,10 +594,13 @@ async function callAINonStreaming(
593594
}
594595

595596
// Create a proper error object with the status code preserved
596-
const error: any = new Error(`HTTP error! Status: ${response.status}`);
597-
// Add status code as a property of the error object
598-
error.status = response.status;
599-
error.statusCode = response.status; // Add statusCode for compatibility with different error patterns
597+
const error = new CallAIError({
598+
message: `HTTP error! Status: ${response.status}`,
599+
status: response.status,
600+
statusText: response.statusText,
601+
details: undefined,
602+
contentType: "text/plain",
603+
})
600604
throw error;
601605
}
602606

0 commit comments

Comments
 (0)