Skip to content

Commit 6433ab8

Browse files
committed
feat: types update
1 parent 358811c commit 6433ab8

File tree

1 file changed

+83
-16
lines changed

1 file changed

+83
-16
lines changed

index.d.ts

Lines changed: 83 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3,18 +3,66 @@ import { RequestInit } from 'node-fetch';
33
/** Message in {@link https://platform.openai.com/docs/guides/chat/introduction chat format} */
44
export type Message = {
55
/**
6-
* The system message helps set the behavior of the assistant. In the example above,
7-
* the assistant was instructed with “You are a helpful assistant.”
6+
* _Required_
7+
*
8+
* The role of the messages author.
9+
* {@link https://platform.openai.com/docs/guides/gpt/chat-completions-api Read more }
10+
*/
11+
role: 'system' | 'user' | 'assistant' | 'function';
12+
/**
13+
* _Optional_
14+
*
15+
* The contents of the message. `content` is required for all messages
16+
* except assistant messages with function calls.
17+
*/
18+
content?: string;
19+
/**
20+
* _Optional_
21+
*
22+
* The name of the author of this message.
823
*
9-
* The user messages help instruct the assistant. They can be generated
10-
* by the end users of an application, or set by a developer as an instruction.
24+
* `name` is required if role is `function`, and it should be the name of the function
25+
* whose response is in the `content`.
1126
*
12-
* The assistant messages help store prior responses.
13-
* They can also be written by a developer to help give examples of desired behavior.
27+
* May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
1428
*/
15-
role: 'system' | 'user' | 'assistant';
16-
/** The content of the message */
17-
content: string;
29+
name?: string;
30+
/**
31+
* _Optional_
32+
*
33+
* The name and arguments of a function that should be called, as generated by the model.
34+
*/
35+
function_call?: object;
36+
};
37+
/**
38+
* In an API call, you can describe functions to gpt-3.5-turbo-0613 and gpt-4-0613,
39+
* and have the model intelligently choose to output a JSON object containing
40+
* arguments to call those functions. The Chat Completions API does not call the function;
41+
* instead, the model generates JSON that you can use to call the function in your code.
42+
*
43+
* {@link https://platform.openai.com/docs/guides/gpt/function-calling Read more}
44+
*/
45+
export type FunctionModel = {
46+
/**
47+
* _Required_
48+
*
49+
* The name of the function to be called.
50+
* Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
51+
*/
52+
name: string;
53+
/**
54+
* _Optional_
55+
*
56+
* The description of what the function does.
57+
*/
58+
description?: string;
59+
/**
60+
* _Optional_
61+
*
62+
* The parameters the functions accepts, described as a JSON Schema object.
63+
* See the {@link https://platform.openai.com/docs/guides/gpt/function-calling guide} for examples, and the {@link https://json-schema.org/understanding-json-schema/ JSON Schema reference} for documentation about the format.
64+
*/
65+
parameters?: object;
1866
};
1967
/** Request body */
2068
export type ReqBody = {
@@ -23,13 +71,30 @@ export type ReqBody = {
2371
*
2472
* ID of the model to use. See the {@link https://platform.openai.com/docs/models/model-endpoint-compatibility model endpoint compatibility} table for details on which models work with the Chat API.
2573
*/
26-
model: 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-32k' | 'gpt-4-32k-0314';
74+
model: 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0613';
2775
/**
2876
* _Required_
2977
*
3078
* The messages to generate chat completions for, in the {@link https://platform.openai.com/docs/guides/chat/introduction chat format}.
3179
*/
3280
messages: Array<Message>;
81+
/**
82+
* _Optional_
83+
*
84+
* A list of functions the model may generate JSON inputs for.
85+
*/
86+
functions?: Array<FunctionModel>;
87+
/**
88+
* _Optional_
89+
*
90+
* Controls how the model responds to function calls. "none" means the model does not
91+
* call a function, and responds to the end-user.
92+
* "auto" means the model can pick between an end-user or calling a function.
93+
* Specifying a particular function via `{"name":\ "my_function"}`
94+
* forces the model to call that function. "none" is the default when no functions are present.
95+
* "auto" is the default if functions are present.
96+
*/
97+
function_call?: string | object;
3398
/**
3499
* _Optional. Defaults to 1_
35100
*
@@ -46,7 +111,7 @@ export type ReqBody = {
46111
* considers the results of the tokens with top_p probability mass. So 0.1 means only the
47112
* tokens comprising the top 10% probability mass are considered.
48113
*
49-
* We generally recommend altering this or temperature but not both.
114+
* We generally recommend altering this or `temperature` but not both.
50115
*/
51116
top_p?: number;
52117
/**
@@ -70,8 +135,10 @@ export type ReqBody = {
70135
/**
71136
* _Optional. Defaults to inf_
72137
*
73-
* The maximum number of tokens allowed for the generated answer.
74-
* By default, the number of tokens the model can return will be (4096 - prompt tokens).
138+
* The maximum number of tokens to generate in the chat completion..
139+
* The total length of input tokens and generated tokens is limited by the model's context length
140+
*
141+
* {@link https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb Example Python code} for counting tokens.
75142
*/
76143
max_tokens?: number;
77144
/**
@@ -205,7 +272,7 @@ export declare class ChatGPT {
205272
ORG: string | undefined;
206273
URL: string;
207274
MODEL: ReqBody['model'];
208-
constructor({ API_KEY, ORG, MODEL, }: {
275+
constructor({ API_KEY, ORG, URL, MODEL, }: {
209276
/**
210277
* The OpenAI API uses API keys for authentication.
211278
* Visit your {@link https://platform.openai.com/account/api-keys API Keys} page to retrieve the API key you'll use in your requests.
@@ -234,7 +301,7 @@ export declare class ChatGPT {
234301
/**
235302
* ## .send(ReqBody | string, [RequestInit])
236303
*
237-
* Use this method to send request to ChatGPT API
304+
* Use this method to send a request to ChatGPT API
238305
*
239306
* `RequestInit` is {@link https://www.npmjs.com/package/node-fetch#options node-fetch options}.
240307
*
@@ -256,7 +323,7 @@ export declare class ChatGPT {
256323
/**
257324
* ## .stream(ReqBody | string, [RequestInit])
258325
*
259-
* Use this method to send request to ChatGPT API and get steam response back
326+
* Use this method to send a request to ChatGPT API and get steam response back
260327
*
261328
* `RequestInit` is {@link https://www.npmjs.com/package/node-fetch#options node-fetch options}.
262329
*

0 commit comments

Comments
 (0)