Skip to content
This repository was archived by the owner on Aug 23, 2025. It is now read-only.

Commit 25dab32

Browse files
committed
chore: now the compiler is happy
1 parent 2df325e commit 25dab32

32 files changed

+517
-333
lines changed

src/api-core.ts

Lines changed: 36 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,9 @@ import {
1212
isToolUseType,
1313
isToolUseResponse,
1414
isOpenAIArray,
15+
OpenAIFunctionCall,
16+
RequestSchema,
17+
CallAIError,
1518
} from "./types.js";
1619
import { globalDebug } from "./key-management.js";
1720
import { callAINonStreaming } from "./non-streaming.js";
@@ -41,7 +44,9 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
4144
let schemaStrategy: SchemaStrategy = {
4245
strategy: "none" as const,
4346
model: options.model || "openai/gpt-3.5-turbo",
44-
prepareRequest: () => (undefined),
47+
prepareRequest: () => {
48+
throw new Error("Schema strategy not implemented");
49+
},
4550
processResponse: (response) => {
4651
// If response is an object, stringify it to match expected test output
4752
if (response && typeof response === "object") {
@@ -67,16 +72,12 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
6772
shouldForceStream: false,
6873
prepareRequest: (schema) => {
6974
// Parse the schema to extract the function definition
70-
let toolDef: {
71-
name?: string;
72-
description?: string;
73-
parameters?: unknown;
74-
} = {};
75+
let toolDef: Partial<RequestSchema> = {};
7576

7677
if (typeof schema === "string") {
7778
try {
7879
toolDef = JSON.parse(schema);
79-
// eslint-disable-next-line @typescript-eslint/no-unused-vars
80+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
8081
} catch (e) {
8182
// If it's not valid JSON, we'll use it as a plain description
8283
toolDef = { description: schema };
@@ -97,7 +98,7 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
9798
properties: {},
9899
},
99100
},
100-
},
101+
} satisfies OpenAIFunctionCall,
101102
];
102103

103104
return {
@@ -149,7 +150,7 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
149150
shouldForceStream: false,
150151
prepareRequest: (schema) => {
151152
// Create a properly formatted JSON schema request
152-
const schemaObj = (schema as Schema) || {};
153+
const schemaObj: Partial<Schema> = schema || {};
153154
return {
154155
response_format: {
155156
type: "json_schema",
@@ -200,7 +201,6 @@ function callAi(prompt: string | Message[], options: CallAIOptions = {}) {
200201
})();
201202

202203
// Create a proxy object that acts both as a Promise and an AsyncGenerator for backward compatibility
203-
// @ts-ignore - We're deliberately implementing a proxy with dual behavior
204204
return createBackwardCompatStreamingProxy(streamPromise);
205205
} else {
206206
if (debug) {
@@ -239,17 +239,21 @@ async function bufferStreamingResults(
239239
} catch (error) {
240240
// If we already collected some content, attach it to the error
241241
if (error instanceof Error) {
242-
const enhancedError = new Error(
243-
`${error.message} (Partial content: ${result.slice(0, 100)}...)`,
244-
);
245-
(enhancedError as any).partialContent = result;
246-
(enhancedError as any).originalError = error;
242+
const enhancedError = new CallAIError({
243+
message: `${error.message} (Partial content: ${result.slice(0, 100)}...)`,
244+
status: 511,
245+
partialContent: result,
246+
originalError: error,
247+
});
247248
throw enhancedError;
248249
} else {
249250
// For non-Error objects, create an Error with info
250-
const newError = new Error(`Streaming error: ${String(error)}`);
251-
(newError as any).partialContent = result;
252-
(newError as any).originalError = error;
251+
const newError = new CallAIError({
252+
message: `Streaming error: ${String(error)}`,
253+
status: 511,
254+
partialContent: result,
255+
originalError: error as Error,
256+
});
253257
throw newError;
254258
}
255259
}
@@ -263,7 +267,7 @@ function createBackwardCompatStreamingProxy(
263267
promise: Promise<StreamResponse>,
264268
): ThenableStreamResponse {
265269
// Create a proxy that forwards methods to the Promise or AsyncGenerator as appropriate
266-
return new Proxy({} as any, {
270+
return new Proxy({} as ThenableStreamResponse, {
267271
get(_target, prop) {
268272
// First check if it's an AsyncGenerator method (needed for for-await)
269273
if (
@@ -277,7 +281,7 @@ function createBackwardCompatStreamingProxy(
277281
return function () {
278282
return {
279283
// Implement async iterator that gets the generator first
280-
async next(value?: unknown) {
284+
async next(value: unknown) {
281285
try {
282286
const generator = await promise;
283287
return generator.next(value);
@@ -291,9 +295,18 @@ function createBackwardCompatStreamingProxy(
291295
}
292296

293297
// Methods like next, throw, return
294-
return async function (value?: unknown) {
298+
return async function (value: unknown) {
295299
const generator = await promise;
296-
return (generator as any)[prop](value);
300+
switch (prop) {
301+
case "next":
302+
return generator.next(value);
303+
case "throw":
304+
return generator.throw(value);
305+
case "return":
306+
return generator.return(value as string);
307+
default:
308+
throw new Error(`Unknown method: ${String(prop)}`);
309+
}
297310
};
298311
}
299312

@@ -319,7 +332,7 @@ function prepareRequestParams(
319332
options: CallAIOptions = {},
320333
) {
321334
// Get API key from options or window.CALLAI_API_KEY (exactly matching original)
322-
const apiKey = options.apiKey || callAiEnv.CALLAI_API_KEY
335+
const apiKey = options.apiKey || callAiEnv.CALLAI_API_KEY;
323336

324337
// Validate API key with original error message
325338
if (!apiKey) {

src/api.ts

Lines changed: 25 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
*/
44
import {
55
CallAIError,
6+
CallAIErrorParams,
67
CallAIOptions,
78
Message,
89
ResponseMeta,
@@ -39,7 +40,6 @@ import { callAiEnv } from "./utils.js";
3940
// export { getMeta };
4041

4142
// Import package version for debugging
42-
4343

4444
// Default fallback model when the primary model fails or is unavailable
4545
const FALLBACK_MODEL = "openrouter/auto";
@@ -260,7 +260,7 @@ export function callAi(
260260
statusText: response.statusText,
261261
details: errorJson,
262262
contentType,
263-
})
263+
});
264264
throw error;
265265
} catch (jsonError) {
266266
// If JSON parsing fails, extract a useful message from the raw error body
@@ -303,7 +303,7 @@ export function callAi(
303303
statusText: response.statusText,
304304
details: errorBody,
305305
contentType,
306-
})
306+
});
307307
throw error;
308308
}
309309
} catch (responseError) {
@@ -313,14 +313,13 @@ export function callAi(
313313
}
314314

315315
// Fallback error
316-
const error = new CallAIError(
317-
{
318-
message: `API returned ${response.status}: ${response.statusText}`,
319-
status: response.status,
320-
statusText: response.statusText,
321-
details: undefined,
322-
contentType,
323-
})
316+
const error = new CallAIError({
317+
message: `API returned ${response.status}: ${response.statusText}`,
318+
status: response.status,
319+
statusText: response.statusText,
320+
details: undefined,
321+
contentType,
322+
});
324323
throw error;
325324
}
326325
}
@@ -391,13 +390,18 @@ async function bufferStreamingResults(
391390
}
392391
} catch (error) {
393392
// Handle errors with standard API error handling
394-
await handleApiError(error, "Buffered streaming", options.debug, {
395-
apiKey: options.apiKey,
396-
endpoint: options.endpoint,
397-
skipRefresh: options.skipRefresh,
398-
refreshToken: options.refreshToken,
399-
updateRefreshToken: options.updateRefreshToken,
400-
});
393+
await handleApiError(
394+
error as CallAIErrorParams,
395+
"Buffered streaming",
396+
options.debug,
397+
{
398+
apiKey: options.apiKey,
399+
endpoint: options.endpoint,
400+
skipRefresh: options.skipRefresh,
401+
refreshToken: options.refreshToken,
402+
updateRefreshToken: options.updateRefreshToken,
403+
},
404+
);
401405
// If we get here, key was refreshed successfully, retry the operation with the new key
402406
// Retry with the refreshed key
403407
return bufferStreamingResults(prompt, {
@@ -420,7 +424,6 @@ async function bufferStreamingResults(
420424

421425
// checkForInvalidModelError is imported from error-handling.ts
422426

423-
424427
/**
425428
* Prepare request parameters common to both streaming and non-streaming calls
426429
*/
@@ -436,8 +439,7 @@ function prepareRequestParams(
436439
} {
437440
// First try to get the API key from options or window globals
438441
const apiKey =
439-
options.apiKey ||
440-
keyStore.current || callAiEnv.CALLAI_API_KEY() // Try keyStore first in case it was refreshed in a previous call
442+
options.apiKey || keyStore.current || callAiEnv.CALLAI_API_KEY(); // Try keyStore first in case it was refreshed in a previous call
441443
const schema = options.schema || null;
442444

443445
// If no API key exists, we won't throw immediately. We'll continue and let handleApiError
@@ -600,7 +602,7 @@ async function callAINonStreaming(
600602
statusText: response.statusText,
601603
details: undefined,
602604
contentType: "text/plain",
603-
})
605+
});
604606
throw error;
605607
}
606608

@@ -612,7 +614,7 @@ async function callAINonStreaming(
612614
result = await extractClaudeResponse(response);
613615
} catch (error) {
614616
handleApiError(
615-
error,
617+
error as CallAIErrorParams,
616618
"Claude API response processing failed",
617619
options.debug,
618620
);

src/error-handling.ts

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import { CallAIError, CallAIErrorParams } from "./types.js";
1515
// @param debug Whether to log debug information
1616
// @param options Options for error handling including key refresh control
1717
async function handleApiError(
18-
error: CallAIErrorParams,
18+
ierror: unknown,
1919
context: string,
2020
debug: boolean = globalDebug,
2121
options: {
@@ -26,6 +26,8 @@ async function handleApiError(
2626
updateRefreshToken?: (currentToken: string) => Promise<string>;
2727
} = {},
2828
): Promise<void> {
29+
const error = ierror as CallAIErrorParams;
30+
2931
// Extract error details
3032
const errorMessage = error?.message || String(error);
3133
const status =
@@ -171,23 +173,25 @@ async function handleApiError(
171173
);
172174
}
173175
// Create a more detailed error from the original one
174-
const detailedError = new CallAIError( {
175-
message: `${errorMessage} (Key refresh failed: ${refreshError instanceof Error ? refreshError.message : String(refreshError)})`,
176-
originalError: error,
177-
refreshError: refreshError,
178-
status: status || 401
179-
})
176+
const detailedError = new CallAIError({
177+
message: `${errorMessage} (Key refresh failed: ${refreshError instanceof Error ? refreshError.message : String(refreshError)})`,
178+
originalError: error,
179+
refreshError,
180+
status: status || 401,
181+
contentType: "text/plain",
182+
});
180183

181184
throw detailedError;
182185
}
183186
}
184187

185188
// For non-key errors, create a detailed error object
186-
const detailedError = new Error(`${context}: ${errorMessage}`);
187-
(detailedError as any).originalError = error;
188-
(detailedError as any).status = status || 500;
189-
(detailedError as any).errorType = error?.name || "Error";
190-
189+
const detailedError = new CallAIError({
190+
message: `${context}: ${errorMessage}`,
191+
originalError: error,
192+
status: status || 500,
193+
errorType: error.name || "Error",
194+
});
191195
throw detailedError;
192196
}
193197

@@ -196,7 +200,7 @@ async function checkForInvalidModelError(
196200
response: Response,
197201
model: string,
198202
debug: boolean = globalDebug,
199-
): Promise<{ isInvalidModel: boolean; errorData?: any }> {
203+
): Promise<{ isInvalidModel: boolean; errorData?: unknown }> {
200204
// Only check 4xx errors (which could indicate invalid model)
201205
if (response.status < 400 || response.status >= 500) {
202206
return { isInvalidModel: false };
@@ -209,12 +213,14 @@ async function checkForInvalidModelError(
209213
let errorData;
210214
try {
211215
errorData = await responseClone.json();
216+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
212217
} catch (e) {
213218
// If it's not JSON, get the text
214219
try {
215220
const text = await responseClone.text();
216221
errorData = { error: text };
217-
} catch (textError) {
222+
// eslint-disable-next-line @typescript-eslint/no-unused-vars
223+
} catch (e) {
218224
errorData = { error: `Error ${response.status}: ${response.statusText}` };
219225
}
220226
}

src/image.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ import { callAiEnv } from "./utils.js";
77
import { PACKAGE_VERSION } from "./version.js";
88

99
// Import package version for debugging (same as main API)
10-
1110

1211
/**
1312
* Generate images using a custom API that mimics OpenAI's image generation capabilities
@@ -34,7 +33,7 @@ export async function imageGen(
3433
}
3534

3635
// Get custom origin if set
37-
const customOrigin = options.imgUrl || callAiEnv.CALLAI_IMG_URL
36+
const customOrigin = options.imgUrl || callAiEnv.CALLAI_IMG_URL;
3837

3938
try {
4039
// Handle image generation

src/index.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
export * from "./types.js";
77

88
// Export API functions
9-
export { callAi, } from "./api.js";
9+
export { callAi } from "./api.js";
1010
// Backward compatibility for callAI (uppercase AI)
1111
export { callAi as callAI } from "./api.js";
1212

0 commit comments

Comments
 (0)