Skip to content

Commit

Permalink
update chat complete function
Browse files Browse the repository at this point in the history
  • Loading branch information
redevrx committed Jan 24, 2024
1 parent 7df1d73 commit c8ccc12
Show file tree
Hide file tree
Showing 12 changed files with 207 additions and 9 deletions.
7 changes: 6 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,4 +179,9 @@
- missing first word in sse response
- Fix sse bad state close
- sse not work as intended
- custom OpenAI endpoint
- custom OpenAI endpoint

# 3.0.0
- Update OpenAi Model
- Add New Model Generate Image
- Add Assistants API
2 changes: 1 addition & 1 deletion analysis_options.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ dart_code_metrics:
metrics-exclude:
- test/**
rules:
- avoid-dynamic
# - avoid-dynamic
- avoid-passing-async-when-sync-expected
- avoid-redundant-async
- avoid-unnecessary-type-assertions
Expand Down
2 changes: 1 addition & 1 deletion example/pubspec.lock
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ packages:
path: ".."
relative: true
source: path
version: "2.2.8"
version: "3.0.0"
clock:
dependency: transitive
description:
Expand Down
6 changes: 6 additions & 0 deletions lib/src/assistants.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
import 'client/client.dart';

class Assistants {
final OpenAIClient _client;
Assistants(this._client);
}
19 changes: 19 additions & 0 deletions lib/src/model/assistant/enum/assistant_model.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import 'package:chat_gpt_sdk/chat_gpt_sdk.dart';

sealed class AssistantModel {
final String model;

AssistantModel({required this.model});
}

class GptTurbo0301Model extends AssistantModel {
GptTurbo0301Model() : super(model: kChatGptTurbo0301Model);
}

class GptTurbo1106Model extends AssistantModel {
GptTurbo1106Model() : super(model: kChatGptTurbo1106);
}

class Gpt41106PreviewModel extends AssistantModel {
Gpt41106PreviewModel() : super(model: kGpt41106Preview);
}
69 changes: 69 additions & 0 deletions lib/src/model/assistant/request/assistant.dart
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import 'package:chat_gpt_sdk/src/model/assistant/enum/assistant_model.dart';

class Assistant {
///ID of the model to use. You can use the List models
/// API to see all of your available models, or see our Model
/// overview for descriptions of them.
/// [model]
final AssistantModel model;

///The name of the assistant. The maximum length is 256 characters.
///[name]
final String? name;

///The description of the assistant. The maximum length is 512 characters.
///[description]
final String? description;

///The system instructions that the assistant uses.
/// The maximum length is 32768 characters.
/// [instructions]
final String? instructions;

///A list of tool enabled on the assistant.
/// There can be a maximum of 128 tools per assistant.
/// Tools can be of types code_interpreter, retrieval, or
/// https://platform.openai.com/docs/api-reference/assistants/createAssistant
/// ### Code Interpreter
/// "tools": [
/// { "type": "code_interpreter" }
/// ]
///### Enabling Retrieval
///"tools": [{"type": "retrieval"}]
/// [tools]
final List? tools;

///A list of file IDs attached to this assistant.
/// There can be a maximum of 20 files attached to the assistant.
/// Files are ordered by their creation date in ascending order.
/// [fileIds]
final List? fileIds;

///Set of 16 key-value pairs that can be attached to an object.
/// This can be useful for storing additional information
/// about the object in a structured format.
/// Keys can be a maximum of 64 characters
/// long and values can be a maxium of 512 characters long.
/// [metadata]
final Map? metadata;

Assistant({
required this.model,
this.name,
this.description,
this.instructions,
this.tools,
this.fileIds,
this.metadata,
});

Map<String, dynamic> toJson() => Map.of({
"model": model.model,
'name': name,
'description': description,
'instructions': instructions,
'tools': tools ?? [],
'file_ids': fileIds ?? [],
'metadata': metadata ?? {},
});
}
4 changes: 4 additions & 0 deletions lib/src/model/chat_complete/enum/chat_model.dart
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@ class Gpt40631ChatModel extends ChatModel {
Gpt40631ChatModel() : super(model: kChatGpt40631);
}

class Gpt4VisionPreviewChatModel extends ChatModel {
Gpt4VisionPreviewChatModel() : super(model: kGpt4VisionPreview);
}

// enum ChatModel {
// gptTurbo,
//
Expand Down
91 changes: 88 additions & 3 deletions lib/src/model/chat_complete/request/chat_complete_text.dart
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ class ChatCompleteText {

///A list of functions the model may generate JSON inputs for.
///[functions]
@Deprecated('using tools')
final List<FunctionData>? functions;

///Controls how the model responds to function calls.
Expand All @@ -37,6 +38,7 @@ class ChatCompleteText {
/// call that function. "none" is the default when no functions
/// are present. "auto" is the default if functions are present.{"name":\ "my_function"}
/// [functionCall]
@Deprecated('using tools')
final FunctionCall? functionCall;

///Defines the format of the model's response output. Currently, only supports
Expand All @@ -57,6 +59,50 @@ class ChatCompleteText {
///We generally recommend altering this or temperature but not both. [topP]
final double? topP;

///A list of tools the model may call. Currently,
/// only functions are supported as a tool.
/// Use this to provide a list of functions
/// the model may generate JSON inputs for.
/// [tools]
/**
* ## Example
* ```dart
* final tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]}}},
]
* ```
*/
final List<Map<String, dynamic>>? tools;

///[toolChoice]
/// ### Type String or Map
///Controls which (if any) function is called by the model.
/// none means the model will not call a function and instead
/// generates a message. auto means the model can pick between
/// generating a message or calling a function.
/// Specifying a particular function via
/// {"type": "function", "function": {"name": "my_function"}}
/// forces the model to call that function.none is the default when no
/// functions are present. auto is the default if functions are present.
final dynamic toolChoice;

///How many chat completion choices to generate for each input message. [n]
final int? n;

Expand Down Expand Up @@ -97,7 +143,30 @@ class ChatCompleteText {
/// values between -1 and 1 should decrease or increase likelihood of selection;
/// values like -100 or 100 should result in a ban or exclusive selection of the
/// relevant token. [logitBias]
//final Map<String, dynamic>? logitBias;
final Map<String, dynamic>? logitBias;

///Whether to return log probabilities of the output tokens or not.
/// If true, returns the log probabilities of each output token returned
/// in the content of message. This option is currently not available on
/// the gpt-4-vision-preview model.
/// [logprobs]
final bool logprobs;

///An integer between 0 and 5 specifying the number of most
/// likely tokens to return at each token position,
/// each with an associated log probability.
/// logprobs must be set to true if this parameter is used.
/// [topLogprobs]
final int? topLogprobs;

///This feature is in Beta.
/// If specified, our system will make a best effort to sample
/// deterministically, such that repeated requests with the same seed
/// and parameters should return the same result. Determinism is not
/// guaranteed, and you should refer to the system_fingerprint response
/// parameter to monitor changes in the backend.
/// [seed]
final int? seed;

///A unique identifier representing your end-user, which can help OpenAI
///to monitor and detect abuse.[user]
Expand All @@ -118,6 +187,12 @@ class ChatCompleteText {
this.functions,
this.functionCall,
this.responseFormat,
this.logprobs = false,
this.logitBias,
this.topLogprobs,
this.seed,
this.tools,
this.toolChoice,
});

Map<String, dynamic> toJson() {
Expand All @@ -126,8 +201,8 @@ class ChatCompleteText {
? Map.of({
"model": model.model,
"messages": messages.map((e) => e.toJsonFunctionStruct()).toList(),
"functions": functions?.map((e) => e.toJson()).toList(),
"function_call": functionCall?.name,
// "functions": functions?.map((e) => e.toJson()).toList(),
// "function_call": functionCall?.name,
"temperature": temperature,
"top_p": topP,
"n": n,
Expand All @@ -138,6 +213,11 @@ class ChatCompleteText {
"frequency_penalty": frequencyPenalty,
"user": user,
"response_format": responseFormat?.toJson(),
"logit_bias": logitBias,
"logprobs": logprobs,
"top_logprobs": topLogprobs,
"seed": seed,
"tool_choice": toolChoice,
})
: Map.of({
"model": model.model,
Expand All @@ -152,6 +232,11 @@ class ChatCompleteText {
"frequency_penalty": frequencyPenalty,
"user": user,
"response_format": responseFormat?.toJson(),
"logit_bias": logitBias,
"logprobs": logprobs,
"top_logprobs": topLogprobs,
"seed": seed,
"tool_choice": toolChoice,
})
..removeWhere((key, value) => value == null);

Expand Down
4 changes: 4 additions & 0 deletions lib/src/openai.dart
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import 'dart:async';
import 'dart:io';
import 'package:chat_gpt_sdk/src/assistants.dart';
import 'package:chat_gpt_sdk/src/audio.dart';
import 'package:chat_gpt_sdk/src/client/client.dart';
import 'package:chat_gpt_sdk/src/client/exception/missing_token_exception.dart';
Expand Down Expand Up @@ -228,4 +229,7 @@ class OpenAI implements IOpenAI {

///moderation's
Moderation get moderation => Moderation(_client);

///Assistants
Assistants get assistant => Assistants(_client);
}
5 changes: 5 additions & 0 deletions lib/src/utils/constants.dart
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ const kFineTuneModel = 'models';
///moderation's
const kModeration = 'moderations';

///assistants
const kAssistants = 'assistants';

///model name
const kGpt3TurboInstruct = 'gpt-3.5-turbo-instruct';

Expand All @@ -58,6 +61,8 @@ const kChatGptTurbo0613 = 'gpt-3.5-turbo-0613';
const kChatGptTurbo1106 = 'gpt-3.5-turbo-1106';
const kChatGptTurbo16k0613 = 'gpt-3.5-turbo-16k-0613';
const kChatGpt40631 = 'gpt-4-0613';
const kGpt41106Preview = 'gpt-4-1106-preview';
const kGpt4VisionPreview = 'gpt-4-vision-preview';

///edits
// using gpt 4
Expand Down
2 changes: 1 addition & 1 deletion pubspec.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name: chat_gpt_sdk
description: create chat bot and other bot with ChatGPT SDK Support GPT-4 , 3.5 and SSE Generate Prompt (Stream)
version: 2.2.8
version: 3.0.0
homepage: https://www.facebook.com/REDEVRX
repository: https://github.com/redevRx/Flutter-ChatGPT

Expand Down
5 changes: 3 additions & 2 deletions test/model/fine_tune/request/create_fine_tune_test.dart
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,9 @@ void main() {
group('openai create fine tune test', () {
test('openai create fine tune test to json', () {
final fineTune = CreateFineTune(
trainingFile: 'trainingFile', model: Babbage002FineModel())
.toJson();
trainingFile: 'trainingFile',
model: Babbage002FineModel(),
).toJson();

expect(fineTune['training_file'], 'trainingFile');
expect(fineTune['n_epochs'], 4);
Expand Down

0 comments on commit c8ccc12

Please sign in to comment.