Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
5752065
Make Params strictly typed
narengogi Jul 6, 2024
9c5d95f
Update types for openai create chat completion object
narengogi Jul 15, 2024
f9c551e
type fixes for anthropic
narengogi Jul 15, 2024
94b699b
Separate out openai request and response types
narengogi Jul 16, 2024
f8beb13
Fix types for Gemini
narengogi Jul 16, 2024
95ba9f4
Fix Types for Reka AI
narengogi Jul 16, 2024
ea78ee5
formatting
narengogi Jul 16, 2024
0dde4b5
revert system message should only be string change
narengogi Jul 19, 2024
6814fe6
revert changes that mades system message string only as the older for…
narengogi Jul 19, 2024
ea23dc3
fix: convert anthropic stop_reason to finish_reason
May 14, 2024
33952d2
fix: groq finish reason with streaming
May 14, 2024
4b7ff2c
fix google stop reason
May 16, 2024
a72cf50
fix: anthropic non stream stop reason
May 27, 2024
80790e6
Merge branch 'fix/types' into unsync-anthropic-stop-reason
narengogi Jul 20, 2024
9214e19
Refactor Anthropic stop reason
narengogi Jul 20, 2024
b3f2310
Revert "Merge branch 'fix/types' into unsync-anthropic-stop-reason"
narengogi Jul 20, 2024
27614ec
Fix Finish Reason for Multiple providers
narengogi Jul 20, 2024
2864ca3
separate finish reason for chat complete and completions
narengogi Jul 23, 2024
78f1366
Refactor to make completion endpoints across providers transform to O…
narengogi Jul 23, 2024
2bcff70
Refactor existing chat complete finish reason transformer to transfor…
narengogi Jul 23, 2024
c1f481b
Fix Finish reason for chat complete of reka and workers ai
narengogi Jul 23, 2024
fa1751a
Transform more providers for chatComplete
narengogi Jul 23, 2024
502e07d
refactoring
narengogi Jul 24, 2024
bd03838
refactoring
narengogi Jul 24, 2024
2c4ec88
refactoring
narengogi Jul 24, 2024
6686010
refactoring
narengogi Jul 24, 2024
b301b27
refactoring
narengogi Jul 24, 2024
cdb1acd
Cohere finish reason transform
narengogi Jul 29, 2024
dfec628
Change type of finish reason from enum to enum | string to allow for …
narengogi Jul 29, 2024
3722731
Change type of finish reason from enum to enum | string to allow for …
narengogi Jul 29, 2024
4db18c2
Change type of finish reason from enum to enum | string to allow for …
narengogi Jul 29, 2024
d96d788
Change type of finish reason from enum to enum | string to allow for …
narengogi Jul 29, 2024
23cacb3
missed changes for stream response handlers
narengogi Jul 29, 2024
8e35290
formatting and refactoring
narengogi Jul 30, 2024
b73f7c4
Fix transformation of stop reason for a few more providers
narengogi Jul 30, 2024
1e29ea2
formatting
narengogi Jul 30, 2024
682ae5c
Merge remote-tracking branch 'upstream/main' into unsync-anthropic-st…
narengogi Aug 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 4 additions & 15 deletions src/providers/ai21/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ import {
generateErrorResponse,
generateInvalidProviderResponseError,
} from '../utils';
import { AI21ErrorResponse } from './complete';
import { AI21ErrorResponse } from './types';
import { AI21ChatCompleteResponse } from './types';
import { transformAI21ChatFinishReason } from './utils';

export const AI21ChatCompleteConfig: ProviderConfig = {
messages: [
Expand Down Expand Up @@ -97,19 +99,6 @@ export const AI21ChatCompleteConfig: ProviderConfig = {
},
};

interface AI21ChatCompleteResponse {
id: string;
outputs: {
text: string;
role: string;
finishReason: {
reason: string;
length: number | null;
sequence: string | null;
};
}[];
}

export const AI21ErrorResponseTransform: (
response: AI21ErrorResponse
) => ErrorResponse | undefined = (response) => {
Expand Down Expand Up @@ -148,7 +137,7 @@ export const AI21ChatCompleteResponseTransform: (
},
index: index,
logprobs: null,
finish_reason: o.finishReason?.reason,
finish_reason: transformAI21ChatFinishReason(o.finishReason?.reason),
})),
};
}
Expand Down
30 changes: 5 additions & 25 deletions src/providers/ai21/complete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ import { Params } from '../../types/requestBody';
import { CompletionResponse, ErrorResponse, ProviderConfig } from '../types';
import { generateInvalidProviderResponseError } from '../utils';
import { AI21ErrorResponseTransform } from './chatComplete';
import { AI21CompleteResponse, AI21ErrorResponse } from './types';
import { transformAI21CompletionFinishReason } from './utils';

export const AI21CompleteConfig: ProviderConfig = {
prompt: {
Expand Down Expand Up @@ -65,30 +67,6 @@ export const AI21CompleteConfig: ProviderConfig = {
},
};

interface AI21CompleteResponse {
id: string;
prompt: {
text: string;
tokens: Record<string, any>[];
};
completions: [
{
data: {
text: string;
tokens: Record<string, any>[];
};
finishReason: {
reason: string;
length: number;
};
},
];
}

export interface AI21ErrorResponse {
detail: string;
}

export const AI21CompleteResponseTransform: (
response: AI21CompleteResponse | AI21ErrorResponse,
responseStatus: number
Expand Down Expand Up @@ -116,7 +94,9 @@ export const AI21CompleteResponseTransform: (
text: completion.data.text,
index: index,
logprobs: null,
finish_reason: completion.finishReason?.reason,
finish_reason: transformAI21CompletionFinishReason(
completion.finishReason?.reason
),
})),
usage: {
prompt_tokens: inputTokens,
Expand Down
2 changes: 1 addition & 1 deletion src/providers/ai21/embed.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { ErrorResponse, ProviderConfig } from '../types';
import { EmbedParams, EmbedResponse } from '../../types/embedRequestBody';
import { AI21ErrorResponse } from './complete';
import { AI21ErrorResponse } from './types';
import { AI21 } from '../../globals';
import { generateInvalidProviderResponseError } from '../utils';
import { AI21ErrorResponseTransform } from './chatComplete';
Expand Down
39 changes: 39 additions & 0 deletions src/providers/ai21/types.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
export enum AI21_FINISH_REASON {
stop = 'stop',
length = 'length',
}
export interface AI21ChatCompleteResponse {
id: string;
outputs: {
text: string;
role: string;
finishReason: {
reason: AI21_FINISH_REASON | string;
length: number | null;
sequence: string | null;
};
}[];
}
export interface AI21CompleteResponse {
id: string;
prompt: {
text: string;
tokens: Record<string, any>[];
};
completions: [
{
data: {
text: string;
tokens: Record<string, any>[];
};
finishReason: {
reason: AI21_FINISH_REASON | string;
length: number;
};
},
];
}

export interface AI21ErrorResponse {
detail: string;
}
31 changes: 31 additions & 0 deletions src/providers/ai21/utils.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import {
OPEN_AI_CHAT_COMPLETION_FINISH_REASON,
OPEN_AI_COMPLETION_FINISH_REASON,
} from '../types';
import { AI21_FINISH_REASON } from './types';

export const transformAI21ChatFinishReason = (
reason: AI21_FINISH_REASON | string
): OPEN_AI_CHAT_COMPLETION_FINISH_REASON => {
switch (reason) {
case AI21_FINISH_REASON.stop:
return OPEN_AI_CHAT_COMPLETION_FINISH_REASON.stop;
case AI21_FINISH_REASON.length:
return OPEN_AI_CHAT_COMPLETION_FINISH_REASON.length;
default:
return OPEN_AI_CHAT_COMPLETION_FINISH_REASON.stop;
}
};

export const transformAI21CompletionFinishReason = (
reason: AI21_FINISH_REASON | string
): OPEN_AI_COMPLETION_FINISH_REASON => {
switch (reason) {
case AI21_FINISH_REASON.stop:
return OPEN_AI_COMPLETION_FINISH_REASON.stop;
case AI21_FINISH_REASON.length:
return OPEN_AI_COMPLETION_FINISH_REASON.length;
default:
return OPEN_AI_COMPLETION_FINISH_REASON.stop;
}
};
23 changes: 16 additions & 7 deletions src/providers/anthropic/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,11 @@ import {
generateErrorResponse,
generateInvalidProviderResponseError,
} from '../utils';
import { ANTHROPIC_STOP_REASON } from './types';
import {
transformAnthropicChatStopReason,
transformAnthropicChatStreamChunkStopReason,
} from './utils';

// TODO: this configuration does not enforce the maximum token limit for the input parameter. If you want to enforce this, you might need to add a custom validation function or a max property to the ParameterConfig interface, and then use it in the input configuration. However, this might be complex because the token count is not a simple length check, but depends on the specific tokenization method used by the model.

Expand Down Expand Up @@ -290,7 +295,7 @@ export interface AnthropicChatCompleteResponse {
type: string;
role: string;
content: AnthropicContentItem[];
stop_reason: string;
stop_reason: ANTHROPIC_STOP_REASON | string;
model: string;
stop_sequence: null | string;
usage: {
Expand All @@ -306,7 +311,7 @@ export interface AnthropicChatCompleteStreamResponse {
type: string;
text: string;
partial_json?: string;
stop_reason?: string;
stop_reason: ANTHROPIC_STOP_REASON | string;
};
content_block?: {
type: string;
Expand Down Expand Up @@ -351,10 +356,10 @@ export const AnthropicChatCompleteResponseTransform: (
responseStatus: number
) => ChatCompletionResponse | ErrorResponse = (response, responseStatus) => {
if (responseStatus !== 200) {
const errorResposne = AnthropicErrorResponseTransform(
const errorResponse = AnthropicErrorResponseTransform(
response as AnthropicErrorResponse
);
if (errorResposne) return errorResposne;
if (errorResponse) return errorResponse;
}

if ('content' in response) {
Expand Down Expand Up @@ -394,7 +399,7 @@ export const AnthropicChatCompleteResponseTransform: (
},
index: 0,
logprobs: null,
finish_reason: response.stop_reason,
finish_reason: transformAnthropicChatStopReason(response.stop_reason),
},
],
usage: {
Expand Down Expand Up @@ -479,7 +484,9 @@ export const AnthropicChatCompleteStreamChunkTransform: (
{
index: 0,
delta: {},
finish_reason: parsedChunk.delta?.stop_reason,
finish_reason: transformAnthropicChatStreamChunkStopReason(
parsedChunk.delta.stop_reason
),
},
],
usage: {
Expand Down Expand Up @@ -534,7 +541,9 @@ export const AnthropicChatCompleteStreamChunkTransform: (
},
index: 0,
logprobs: null,
finish_reason: parsedChunk.delta?.stop_reason ?? null,
finish_reason: transformAnthropicChatStreamChunkStopReason(
parsedChunk.delta?.stop_reason
),
},
],
})}` + '\n\n'
Expand Down
19 changes: 14 additions & 5 deletions src/providers/anthropic/complete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@ import {
AnthropicErrorResponse,
AnthropicErrorResponseTransform,
} from './chatComplete';
import { ANTHROPIC_STOP_REASON } from './types';
import {
transformAnthropicCompletionFinishReason,
transformAnthropicCompletionStreamChunkFinishReason,
} from './utils';

// TODO: this configuration does not enforce the maximum token limit for the input parameter. If you want to enforce this, you might need to add a custom validation function or a max property to the ParameterConfig interface, and then use it in the input configuration. However, this might be complex because the token count is not a simple length check, but depends on the specific tokenization method used by the model.

Expand Down Expand Up @@ -59,7 +64,7 @@ export const AnthropicCompleteConfig: ProviderConfig = {

interface AnthropicCompleteResponse {
completion: string;
stop_reason: string;
stop_reason: ANTHROPIC_STOP_REASON | string;
model: string;
truncated: boolean;
stop: null | string;
Expand All @@ -73,10 +78,10 @@ export const AnthropicCompleteResponseTransform: (
responseStatus: number
) => CompletionResponse | ErrorResponse = (response, responseStatus) => {
if (responseStatus !== 200) {
const errorResposne = AnthropicErrorResponseTransform(
const errorResponse = AnthropicErrorResponseTransform(
response as AnthropicErrorResponse
);
if (errorResposne) return errorResposne;
if (errorResponse) return errorResponse;
}

if ('completion' in response) {
Expand All @@ -91,7 +96,9 @@ export const AnthropicCompleteResponseTransform: (
text: response.completion,
index: 0,
logprobs: null,
finish_reason: response.stop_reason,
finish_reason: transformAnthropicCompletionFinishReason(
response.stop_reason
),
},
],
};
Expand Down Expand Up @@ -127,7 +134,9 @@ export const AnthropicCompleteStreamChunkTransform: (
text: parsedChunk.completion,
index: 0,
logprobs: null,
finish_reason: parsedChunk.stop_reason,
finish_reason: transformAnthropicCompletionStreamChunkFinishReason(
parsedChunk.stop_reason
),
},
],
})}` + '\n\n'
Expand Down
6 changes: 6 additions & 0 deletions src/providers/anthropic/types.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
export enum ANTHROPIC_STOP_REASON {
max_tokens = 'max_tokens',
stop_sequence = 'stop_sequence',
tool_use = 'tool_use',
end_turn = 'end_turn',
}
51 changes: 51 additions & 0 deletions src/providers/anthropic/utils.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import {
OPEN_AI_CHAT_COMPLETION_FINISH_REASON,
OPEN_AI_COMPLETION_FINISH_REASON,
} from '../types';
import { ANTHROPIC_STOP_REASON } from './types';

// this converts the anthropic stop_reason to an openai finish_reason

export const transformAnthropicChatStopReason = (
stopReason: ANTHROPIC_STOP_REASON | string
): OPEN_AI_CHAT_COMPLETION_FINISH_REASON => {
switch (stopReason) {
case ANTHROPIC_STOP_REASON.stop_sequence:
case ANTHROPIC_STOP_REASON.end_turn:
return OPEN_AI_CHAT_COMPLETION_FINISH_REASON.stop;
case ANTHROPIC_STOP_REASON.tool_use:
return OPEN_AI_CHAT_COMPLETION_FINISH_REASON.tool_calls;
case ANTHROPIC_STOP_REASON.max_tokens:
return OPEN_AI_CHAT_COMPLETION_FINISH_REASON.length;
default:
return OPEN_AI_CHAT_COMPLETION_FINISH_REASON.stop;
}
};
// finish_reason may be null for stream chunks

export const transformAnthropicChatStreamChunkStopReason = (
stopReason?: ANTHROPIC_STOP_REASON | string | null
): OPEN_AI_CHAT_COMPLETION_FINISH_REASON | null => {
if (!stopReason) return null;
return transformAnthropicChatStopReason(stopReason);
};
export const transformAnthropicCompletionFinishReason = (
stopReason: ANTHROPIC_STOP_REASON | string
): OPEN_AI_COMPLETION_FINISH_REASON => {
switch (stopReason) {
case ANTHROPIC_STOP_REASON.stop_sequence:
case ANTHROPIC_STOP_REASON.end_turn:
return OPEN_AI_COMPLETION_FINISH_REASON.stop;
case ANTHROPIC_STOP_REASON.tool_use:
return OPEN_AI_COMPLETION_FINISH_REASON.length;
default:
return OPEN_AI_COMPLETION_FINISH_REASON.stop;
}
};

export const transformAnthropicCompletionStreamChunkFinishReason = (
stopReason?: ANTHROPIC_STOP_REASON | string | null
): OPEN_AI_COMPLETION_FINISH_REASON | null => {
if (!stopReason) return null;
return transformAnthropicCompletionFinishReason(stopReason);
};
7 changes: 4 additions & 3 deletions src/providers/anyscale/chatComplete.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { ANYSCALE } from '../../globals';
import {
ChatCompletionResponse,
ErrorResponse,
OPEN_AI_CHAT_COMPLETION_FINISH_REASON,
ProviderConfig,
} from '../types';
import {
Expand Down Expand Up @@ -111,7 +112,7 @@ export interface AnyscaleStreamChunk {
content?: string;
};
index: number;
finish_reason: string | null;
finish_reason: OPEN_AI_CHAT_COMPLETION_FINISH_REASON | null;
}[];
}

Expand Down Expand Up @@ -165,10 +166,10 @@ export const AnyscaleChatCompleteResponseTransform: (
responseStatus: number
) => ChatCompletionResponse | ErrorResponse = (response, responseStatus) => {
if (responseStatus !== 200) {
const errorResposne = AnyscaleErrorResponseTransform(
const errorResponse = AnyscaleErrorResponseTransform(
response as AnyscaleErrorResponse | AnyscaleValidationErrorResponse
);
if (errorResposne) return errorResposne;
if (errorResponse) return errorResponse;
}

if ('choices' in response) {
Expand Down
Loading