Skip to content

Commit 7011bc7

Browse files
committed
[ Edit ] exposed more enums for chat, audio, umage for better developer experience and refactored and added more docs
1 parent 9ca9db0 commit 7011bc7

File tree

12 files changed

+60
-48
lines changed

12 files changed

+60
-48
lines changed

README.md

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,10 @@ Creates a completion for the chat message, note you need to set each message as
219219
OpenAIChatCompletionModel chatCompletion = await OpenAI.instance.chat.create(
220220
model: "gpt-3.5-turbo",
221221
messages: [
222-
OpenAIChatCompletionChoiceMessageModel(content: "hello, what is Flutter and Dart ?", role: "user"),
222+
OpenAIChatCompletionChoiceMessageModel(
223+
content: "hello, what is Flutter and Dart ?",
224+
role: OpenAIChatMessageRole.user,
225+
),
223226
],
224227
);
225228
```
@@ -234,7 +237,7 @@ OpenAIStreamChatCompletionModel chatStream = OpenAI.instance.chat.createStream(
234237
messages: [
235238
OpenAIChatCompletionChoiceMessageModel(
236239
content: "hello",
237-
role: "user",
240+
role: OpenAIChatMessageRole.user,
238241
)
239242
],
240243
);
@@ -277,7 +280,7 @@ Generates a new image based on a prompt given.
277280
prompt: 'an astronaut on the sea',
278281
n: 1,
279282
size: OpenAIImageSize.size1024,
280-
responseFormat: OpenAIResponseFormat.url,
283+
responseFormat: OpenAIImageResponseFormat.url,
281284
);
282285
```
283286

@@ -292,7 +295,7 @@ OpenAiImageEditModel imageEdit = await OpenAI.instance.image.edit(
292295
prompt: "mask the image with a dinosaur",
293296
n: 1,
294297
size: OpenAIImageSize.size1024,
295-
responseFormat: OpenAIResponseFormat.url,
298+
responseFormat: OpenAIImageResponseFormat.url,
296299
);
297300
```
298301

@@ -305,7 +308,7 @@ OpenAIImageVariationModel imageVariation = await OpenAI.instance.image.variation
305308
image: File(/* IMAGE PATH HERE */),
306309
n: 1,
307310
size: OpenAIImageSize.size1024,
308-
responseFormat: OpenAIResponseFormat.url,
311+
responseFormat: OpenAIImageResponseFormat.url,
309312
);
310313
```
311314

example/lib/chat_campletion_stream_example.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ void main() {
1212
messages: [
1313
OpenAIChatCompletionChoiceMessageModel(
1414
content: "hello",
15-
role: "user",
15+
role: OpenAIChatMessageRole.user,
1616
)
1717
],
1818
);

example/lib/chat_completion_example.dart

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,11 +6,13 @@ void main() async {
66
// Set the OpenAI API key from the .env file.
77
OpenAI.apiKey = Env.apiKey;
88

9-
OpenAIChatCompletionModel chatCompletion = await OpenAI.instance.chat.create(
10-
model: "gpt-3.5-turbo",
11-
messages: [
12-
OpenAIChatCompletionChoiceMessageModel(content: "hello", role: "user")
13-
]);
9+
OpenAIChatCompletionModel chatCompletion =
10+
await OpenAI.instance.chat.create(model: "gpt-3.5-turbo", messages: [
11+
OpenAIChatCompletionChoiceMessageModel(
12+
content: "hello",
13+
role: OpenAIChatMessageRole.user,
14+
)
15+
]);
1416

1517
print(chatCompletion.id);
1618
print(chatCompletion.choices.first.message);

example/lib/image_variation_example.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ Future<void> main() async {
1313
image: File("example.png"),
1414
n: 1,
1515
size: OpenAIImageSize.size256,
16-
responseFormat: OpenAIResponseFormat.b64Json,
16+
responseFormat: OpenAIImageResponseFormat.b64Json,
1717
);
1818

1919
// Prints the result.

lib/src/core/base/images/base.dart

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,12 @@ extension SizeToStingExtension on OpenAIImageSize {
2525
}
2626
}
2727

28-
extension ResponseFormatToStingExtension on OpenAIResponseFormat {
28+
extension ResponseFormatToStingExtension on OpenAIImageResponseFormat {
2929
String get value {
3030
switch (this) {
31-
case OpenAIResponseFormat.url:
31+
case OpenAIImageResponseFormat.url:
3232
return "url";
33-
case OpenAIResponseFormat.b64Json:
33+
case OpenAIImageResponseFormat.b64Json:
3434
return "b64_json";
3535
}
3636
}

lib/src/core/base/images/interfaces/create.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ abstract class CreateInterface {
66
required String prompt,
77
int? n,
88
OpenAIImageSize? size,
9-
OpenAIResponseFormat? responseFormat,
9+
OpenAIImageResponseFormat? responseFormat,
1010
String? user,
1111
});
1212
}

lib/src/core/base/images/interfaces/edit.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ abstract class EditInterface {
99
required String prompt,
1010
int? n,
1111
OpenAIImageSize? size,
12-
OpenAIResponseFormat? responseFormat,
12+
OpenAIImageResponseFormat? responseFormat,
1313
String? user,
1414
});
1515
}

lib/src/core/base/images/interfaces/variations.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ abstract class VariationInterface {
77
required File image,
88
int? n,
99
OpenAIImageSize? size,
10-
OpenAIResponseFormat? responseFormat,
10+
OpenAIImageResponseFormat? responseFormat,
1111
String? user,
1212
});
1313
}

lib/src/core/models/chat/sub_models/choices/sub_models/message.dart

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
1+
import '../../../../image/enum.dart';
2+
13
/// {@template openai_chat_completion_choice_message_model}
24
/// This represents the message of the [OpenAIChatCompletionChoiceModel] model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods.
35
/// {@endtemplate}
46
class OpenAIChatCompletionChoiceMessageModel {
57
/// The [role] of the message.
6-
final String role;
8+
final OpenAIChatMessageRole role;
79

810
/// The [content] of the message.
911
final String content;
@@ -24,15 +26,16 @@ class OpenAIChatCompletionChoiceMessageModel {
2426
Map<String, dynamic> json,
2527
) {
2628
return OpenAIChatCompletionChoiceMessageModel(
27-
role: json['role'],
29+
role: OpenAIChatMessageRole.values
30+
.firstWhere((role) => role.name == json['role']),
2831
content: json['content'],
2932
);
3033
}
3134

3235
/// This method used to convert the [OpenAIChatCompletionChoiceMessageModel] to a [Map<String, dynamic>] object.
3336
Map<String, dynamic> toMap() {
3437
return {
35-
"role": role,
38+
"role": role.name,
3639
"content": content,
3740
};
3841
}

lib/src/core/models/image/enum.dart

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
enum OpenAIImageSize { size256, size512, size1024 }
22

3-
enum OpenAIResponseFormat { url, b64Json }
3+
enum OpenAIImageResponseFormat { url, b64Json }
44

55
enum OpenAIAudioResponseFormat { json, text, srt, verbose_json, vtt }
6+
7+
enum OpenAIChatMessageRole { system, user, assistant }

lib/src/instance/images/images.dart

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,8 @@ class OpenAIImages implements OpenAIImagesBase {
4242
///
4343
///
4444
/// [responseFormat] is the format in which the generated images are returned. Must be one of :
45-
/// - `OpenAIResponseFormat.url`
46-
/// - `OpenAIResponseFormat.b64Json`
45+
/// - `OpenAIImageResponseFormat.url`
46+
/// - `OpenAIImageResponseFormat.b64Json`
4747
///
4848
///
4949
/// [user] is the user ID to associate with the request. This is used to prevent abuse of the API.
@@ -55,15 +55,15 @@ class OpenAIImages implements OpenAIImagesBase {
5555
/// prompt: 'create an image about the sea',
5656
/// n: 1,
5757
/// size: OpenAIImageSize.size1024,
58-
/// responseFormat: OpenAIResponseFormat.url,
58+
/// responseFormat: OpenAIImageResponseFormat.url,
5959
/// );
6060
///```
6161
@override
6262
Future<OpenAIImageModel> create({
6363
required String prompt,
6464
int? n,
6565
OpenAIImageSize? size,
66-
OpenAIResponseFormat? responseFormat,
66+
OpenAIImageResponseFormat? responseFormat,
6767
String? user,
6868
}) async {
6969
final String generations = "/generations";
@@ -100,8 +100,8 @@ class OpenAIImages implements OpenAIImagesBase {
100100
///
101101
///
102102
/// [responseFormat] is the format in which the generated images are returned. Must be one of :
103-
/// - `OpenAIResponseFormat.url`
104-
/// - `OpenAIResponseFormat.b64Json`
103+
/// - `OpenAIImageResponseFormat.url`
104+
/// - `OpenAIImageResponseFormat.b64Json`
105105
///
106106
///
107107
///
@@ -116,7 +116,7 @@ class OpenAIImages implements OpenAIImagesBase {
116116
/// prompt: "mask the image with a dinosaur in the image",
117117
/// n: 1,
118118
/// size: OpenAIImageSize.size1024,
119-
/// responseFormat: OpenAIResponseFormat.url,
119+
/// responseFormat: OpenAIImageResponseFormat.url,
120120
/// );
121121
///```
122122
@override
@@ -126,7 +126,7 @@ class OpenAIImages implements OpenAIImagesBase {
126126
required String prompt,
127127
int? n,
128128
OpenAIImageSize? size,
129-
OpenAIResponseFormat? responseFormat,
129+
OpenAIImageResponseFormat? responseFormat,
130130
String? user,
131131
}) async {
132132
final String edit = "/edits";
@@ -163,8 +163,8 @@ class OpenAIImages implements OpenAIImagesBase {
163163
///
164164
///
165165
/// [responseFormat] is the format in which the generated images are returned. Must be one of :
166-
/// - `OpenAIResponseFormat.url`
167-
/// - `OpenAIResponseFormat.b64Json`
166+
/// - `OpenAIImageResponseFormat.url`
167+
/// - `OpenAIImageResponseFormat.b64Json`
168168
///
169169
///
170170
/// [user] is the user ID to associate with the request. This is used to prevent abuse of the API.
@@ -176,15 +176,15 @@ class OpenAIImages implements OpenAIImagesBase {
176176
/// image: File(/* IMAGE PATH HERE */),
177177
/// n: 1,
178178
/// size: OpenAIImageSize.size1024,
179-
/// responseFormat: OpenAIResponseFormat.url,
179+
/// responseFormat: OpenAIImageResponseFormat.url,
180180
/// );
181181
/// ```
182182
@override
183183
Future<OpenAIImageVariationModel> variation({
184184
required File image,
185185
int? n,
186186
OpenAIImageSize? size,
187-
OpenAIResponseFormat? responseFormat,
187+
OpenAIImageResponseFormat? responseFormat,
188188
String? user,
189189
}) async {
190190
final String variations = "/variations";

test/openai_test.dart

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import 'package:dart_openai/openai.dart';
55
import 'package:http/http.dart' as http;
66
import 'package:test/test.dart';
77

8+
@Timeout(Duration(minutes: 2))
89
void main() async {
910
final exampleImageFile = await getFileFromUrl(
1011
"https://upload.wikimedia.org/wikipedia/commons/7/7e/Dart-logo.png",
@@ -34,7 +35,7 @@ void main() async {
3435
}
3536
});
3637
test('with setting a key', () {
37-
OpenAI.apiKey = "YOUR KEY HERE SO THE TESTS CAN RUN";
38+
OpenAI.apiKey = "PUT HERE YOUR API KEY";
3839

3940
expect(OpenAI.instance, isA<OpenAI>());
4041
});
@@ -125,7 +126,7 @@ void main() async {
125126
messages: [
126127
OpenAIChatCompletionChoiceMessageModel(
127128
content: "Hello, how are you?",
128-
role: "user",
129+
role: OpenAIChatMessageRole.user,
129130
),
130131
],
131132
);
@@ -148,7 +149,7 @@ void main() async {
148149
messages: [
149150
OpenAIChatCompletionChoiceMessageModel(
150151
content: "Hello, how are you?",
151-
role: "user",
152+
role: OpenAIChatMessageRole.user,
152153
),
153154
],
154155
);
@@ -160,17 +161,18 @@ void main() async {
160161
});
161162
});
162163
group('edits', () {
163-
test('create', () async {
164-
final OpenAIEditModel edit = await OpenAI.instance.edit.create(
165-
model: "text-davinci-edit-001",
166-
instruction: "remove the word 'made' from the text",
167-
input: "I made something, idk man",
168-
);
169-
expect(edit, isA<OpenAIEditModel>());
170-
expect(edit.choices.first, isA<OpenAIEditModelChoice>());
171-
expect(edit.choices.first.text, isNotNull);
172-
expect(edit.choices.first.text, isA<String>());
173-
});
164+
//! temporary disabled, because the API have on this and throws an unexpected error from OpenAI end.
165+
// test('create', () async {
166+
// final OpenAIEditModel edit = await OpenAI.instance.edit.create(
167+
// model: "text-davinci-edit-001",
168+
// instruction: "remove the word 'made' from the text",
169+
// input: "I made something, idk man",
170+
// );
171+
// expect(edit, isA<OpenAIEditModel>());
172+
// expect(edit.choices.first, isA<OpenAIEditModelChoice>());
173+
// expect(edit.choices.first.text, isNotNull);
174+
// expect(edit.choices.first.text, isA<String>());
175+
// });
174176
});
175177
group('images', () {
176178
test('create', () async {

0 commit comments

Comments
 (0)