-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathcompletions.ts
105 lines (80 loc) · 2.87 KB
/
completions.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
import * as Core from '../../core';
import * as ChatAPI from './chat';
export class Completions extends APIResource {
/**
* Talk to different LLM Chat APIs via OpenAI's API
*/
create(body: CompletionCreateParams, options?: Core.RequestOptions): Core.APIPromise<ChatAPI.ChatResponse> {
return this._client.post('/v1/chat/completions', { body, ...options });
}
}
export interface CompletionCreateParams {
frequency_penalty?: number;
/**
* LogitBias is must be a token id string (specified by their token ID in the
* tokenizer), not a word string. incorrect: `"logit_bias":{"You": 6}`, correct:
* `"logit_bias":{"1639": 6}` refs:
* https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias
*/
logit_bias?: Record<string, number>;
/**
* LogProbs indicates whether to return log probabilities of the output tokens or
* not. If true, returns the log probabilities of each output token returned in the
* content of message. This option is currently not available on the
* gpt-4-vision-preview model.
*/
logprobs?: boolean;
max_tokens?: number;
messages?: Array<ChatAPI.ChatMessage>;
model?: string;
n?: number;
/**
* Disable the default behavior of parallel tool calls by setting it: false.
*/
parallel_tool_calls?: boolean;
presence_penalty?: number;
response_format?: CompletionCreateParams.ResponseFormat;
seed?: number;
stop?: Array<string>;
stream?: boolean;
/**
* Options for streaming response. Only set this when you set stream: true.
*/
stream_options?: CompletionCreateParams.StreamOptions;
temperature?: number;
/**
* This can be either a string or an ToolChoice object.
*/
tool_choice?: unknown;
tools?: unknown;
/**
* TopLogProbs is an integer between 0 and 5 specifying the number of most likely
* tokens to return at each token position, each with an associated log
* probability. logprobs must be set to true if this parameter is used.
*/
top_logprobs?: number;
top_p?: number;
user?: string;
}
export namespace CompletionCreateParams {
export interface ResponseFormat {
type?: 'json_object' | 'text';
}
/**
* Options for streaming response. Only set this when you set stream: true.
*/
export interface StreamOptions {
/**
* If set, an additional chunk will be streamed before the data: [DONE] message.
* The usage field on this chunk shows the token usage statistics for the entire
* request, and the choices field will always be an empty array. All other chunks
* will also include a usage field, but with a null value.
*/
include_usage?: boolean;
}
}
export declare namespace Completions {
export { type CompletionCreateParams as CompletionCreateParams };
}