diff --git a/README.md b/README.md index 70199bd8..67673c50 100644 --- a/README.md +++ b/README.md @@ -219,7 +219,10 @@ Creates a completion for the chat message, note you need to set each message as OpenAIChatCompletionModel chatCompletion = await OpenAI.instance.chat.create( model: "gpt-3.5-turbo", messages: [ - OpenAIChatCompletionChoiceMessageModel(content: "hello, what is Flutter and Dart ?", role: "user"), + OpenAIChatCompletionChoiceMessageModel( + content: "hello, what is Flutter and Dart ?", + role: OpenAIChatMessageRole.user, + ), ], ); ``` @@ -234,7 +237,7 @@ OpenAIStreamChatCompletionModel chatStream = OpenAI.instance.chat.createStream( messages: [ OpenAIChatCompletionChoiceMessageModel( content: "hello", - role: "user", + role: OpenAIChatMessageRole.user, ) ], ); @@ -277,7 +280,7 @@ Generates a new image based on a prompt given. prompt: 'an astronaut on the sea', n: 1, size: OpenAIImageSize.size1024, - responseFormat: OpenAIResponseFormat.url, + responseFormat: OpenAIImageResponseFormat.url, ); ``` @@ -292,7 +295,7 @@ OpenAiImageEditModel imageEdit = await OpenAI.instance.image.edit( prompt: "mask the image with a dinosaur", n: 1, size: OpenAIImageSize.size1024, - responseFormat: OpenAIResponseFormat.url, + responseFormat: OpenAIImageResponseFormat.url, ); ``` @@ -305,7 +308,7 @@ OpenAIImageVariationModel imageVariation = await OpenAI.instance.image.variation image: File(/* IMAGE PATH HERE */), n: 1, size: OpenAIImageSize.size1024, - responseFormat: OpenAIResponseFormat.url, + responseFormat: OpenAIImageResponseFormat.url, ); ``` diff --git a/example/lib/chat_campletion_stream_example.dart b/example/lib/chat_campletion_stream_example.dart index 1af7c486..a2199536 100644 --- a/example/lib/chat_campletion_stream_example.dart +++ b/example/lib/chat_campletion_stream_example.dart @@ -12,7 +12,7 @@ void main() { messages: [ OpenAIChatCompletionChoiceMessageModel( content: "hello", - role: "user", + role: OpenAIChatMessageRole.user, ) ], ); diff --git a/example/lib/chat_completion_example.dart b/example/lib/chat_completion_example.dart index 8c103313..42489443 100644 --- a/example/lib/chat_completion_example.dart +++ b/example/lib/chat_completion_example.dart @@ -6,11 +6,13 @@ void main() async { // Set the OpenAI API key from the .env file. OpenAI.apiKey = Env.apiKey; - OpenAIChatCompletionModel chatCompletion = await OpenAI.instance.chat.create( - model: "gpt-3.5-turbo", - messages: [ - OpenAIChatCompletionChoiceMessageModel(content: "hello", role: "user") - ]); + OpenAIChatCompletionModel chatCompletion = + await OpenAI.instance.chat.create(model: "gpt-3.5-turbo", messages: [ + OpenAIChatCompletionChoiceMessageModel( + content: "hello", + role: OpenAIChatMessageRole.user, + ) + ]); print(chatCompletion.id); print(chatCompletion.choices.first.message); diff --git a/example/lib/image_variation_example.dart b/example/lib/image_variation_example.dart index ac7955b8..612d2dd1 100644 --- a/example/lib/image_variation_example.dart +++ b/example/lib/image_variation_example.dart @@ -13,7 +13,7 @@ Future main() async { image: File("example.png"), n: 1, size: OpenAIImageSize.size256, - responseFormat: OpenAIResponseFormat.b64Json, + responseFormat: OpenAIImageResponseFormat.b64Json, ); // Prints the result. diff --git a/lib/src/core/base/images/base.dart b/lib/src/core/base/images/base.dart index bd9a5950..28bdd9e2 100644 --- a/lib/src/core/base/images/base.dart +++ b/lib/src/core/base/images/base.dart @@ -25,12 +25,12 @@ extension SizeToStingExtension on OpenAIImageSize { } } -extension ResponseFormatToStingExtension on OpenAIResponseFormat { +extension ResponseFormatToStingExtension on OpenAIImageResponseFormat { String get value { switch (this) { - case OpenAIResponseFormat.url: + case OpenAIImageResponseFormat.url: return "url"; - case OpenAIResponseFormat.b64Json: + case OpenAIImageResponseFormat.b64Json: return "b64_json"; } } diff --git a/lib/src/core/base/images/interfaces/create.dart b/lib/src/core/base/images/interfaces/create.dart index 45e26ea2..459d647f 100644 --- a/lib/src/core/base/images/interfaces/create.dart +++ b/lib/src/core/base/images/interfaces/create.dart @@ -6,7 +6,7 @@ abstract class CreateInterface { required String prompt, int? n, OpenAIImageSize? size, - OpenAIResponseFormat? responseFormat, + OpenAIImageResponseFormat? responseFormat, String? user, }); } diff --git a/lib/src/core/base/images/interfaces/edit.dart b/lib/src/core/base/images/interfaces/edit.dart index 0ed73689..0cb75411 100644 --- a/lib/src/core/base/images/interfaces/edit.dart +++ b/lib/src/core/base/images/interfaces/edit.dart @@ -9,7 +9,7 @@ abstract class EditInterface { required String prompt, int? n, OpenAIImageSize? size, - OpenAIResponseFormat? responseFormat, + OpenAIImageResponseFormat? responseFormat, String? user, }); } diff --git a/lib/src/core/base/images/interfaces/variations.dart b/lib/src/core/base/images/interfaces/variations.dart index 9a44fbca..5994d4ea 100644 --- a/lib/src/core/base/images/interfaces/variations.dart +++ b/lib/src/core/base/images/interfaces/variations.dart @@ -7,7 +7,7 @@ abstract class VariationInterface { required File image, int? n, OpenAIImageSize? size, - OpenAIResponseFormat? responseFormat, + OpenAIImageResponseFormat? responseFormat, String? user, }); } diff --git a/lib/src/core/models/chat/sub_models/choices/sub_models/message.dart b/lib/src/core/models/chat/sub_models/choices/sub_models/message.dart index fe857a57..6344deb4 100644 --- a/lib/src/core/models/chat/sub_models/choices/sub_models/message.dart +++ b/lib/src/core/models/chat/sub_models/choices/sub_models/message.dart @@ -1,9 +1,11 @@ +import '../../../../image/enum.dart'; + /// {@template openai_chat_completion_choice_message_model} /// This represents the message of the [OpenAIChatCompletionChoiceModel] model of the OpenAI API, which is used and get returned while using the [OpenAIChat] methods. /// {@endtemplate} class OpenAIChatCompletionChoiceMessageModel { /// The [role] of the message. - final String role; + final OpenAIChatMessageRole role; /// The [content] of the message. final String content; @@ -24,7 +26,8 @@ class OpenAIChatCompletionChoiceMessageModel { Map json, ) { return OpenAIChatCompletionChoiceMessageModel( - role: json['role'], + role: OpenAIChatMessageRole.values + .firstWhere((role) => role.name == json['role']), content: json['content'], ); } @@ -32,7 +35,7 @@ class OpenAIChatCompletionChoiceMessageModel { /// This method used to convert the [OpenAIChatCompletionChoiceMessageModel] to a [Map] object. Map toMap() { return { - "role": role, + "role": role.name, "content": content, }; } diff --git a/lib/src/core/models/image/enum.dart b/lib/src/core/models/image/enum.dart index f7bc483a..61a4327b 100644 --- a/lib/src/core/models/image/enum.dart +++ b/lib/src/core/models/image/enum.dart @@ -1,5 +1,7 @@ enum OpenAIImageSize { size256, size512, size1024 } -enum OpenAIResponseFormat { url, b64Json } +enum OpenAIImageResponseFormat { url, b64Json } enum OpenAIAudioResponseFormat { json, text, srt, verbose_json, vtt } + +enum OpenAIChatMessageRole { system, user, assistant } diff --git a/lib/src/instance/images/images.dart b/lib/src/instance/images/images.dart index e3449715..681db8ef 100644 --- a/lib/src/instance/images/images.dart +++ b/lib/src/instance/images/images.dart @@ -42,8 +42,8 @@ class OpenAIImages implements OpenAIImagesBase { /// /// /// [responseFormat] is the format in which the generated images are returned. Must be one of : - /// - `OpenAIResponseFormat.url` - /// - `OpenAIResponseFormat.b64Json` + /// - `OpenAIImageResponseFormat.url` + /// - `OpenAIImageResponseFormat.b64Json` /// /// /// [user] is the user ID to associate with the request. This is used to prevent abuse of the API. @@ -55,7 +55,7 @@ class OpenAIImages implements OpenAIImagesBase { /// prompt: 'create an image about the sea', /// n: 1, /// size: OpenAIImageSize.size1024, - /// responseFormat: OpenAIResponseFormat.url, + /// responseFormat: OpenAIImageResponseFormat.url, /// ); ///``` @override @@ -63,7 +63,7 @@ class OpenAIImages implements OpenAIImagesBase { required String prompt, int? n, OpenAIImageSize? size, - OpenAIResponseFormat? responseFormat, + OpenAIImageResponseFormat? responseFormat, String? user, }) async { final String generations = "/generations"; @@ -100,8 +100,8 @@ class OpenAIImages implements OpenAIImagesBase { /// /// /// [responseFormat] is the format in which the generated images are returned. Must be one of : - /// - `OpenAIResponseFormat.url` - /// - `OpenAIResponseFormat.b64Json` + /// - `OpenAIImageResponseFormat.url` + /// - `OpenAIImageResponseFormat.b64Json` /// /// /// @@ -116,7 +116,7 @@ class OpenAIImages implements OpenAIImagesBase { /// prompt: "mask the image with a dinosaur in the image", /// n: 1, /// size: OpenAIImageSize.size1024, - /// responseFormat: OpenAIResponseFormat.url, + /// responseFormat: OpenAIImageResponseFormat.url, /// ); ///``` @override @@ -126,7 +126,7 @@ class OpenAIImages implements OpenAIImagesBase { required String prompt, int? n, OpenAIImageSize? size, - OpenAIResponseFormat? responseFormat, + OpenAIImageResponseFormat? responseFormat, String? user, }) async { final String edit = "/edits"; @@ -163,8 +163,8 @@ class OpenAIImages implements OpenAIImagesBase { /// /// /// [responseFormat] is the format in which the generated images are returned. Must be one of : - /// - `OpenAIResponseFormat.url` - /// - `OpenAIResponseFormat.b64Json` + /// - `OpenAIImageResponseFormat.url` + /// - `OpenAIImageResponseFormat.b64Json` /// /// /// [user] is the user ID to associate with the request. This is used to prevent abuse of the API. @@ -176,7 +176,7 @@ class OpenAIImages implements OpenAIImagesBase { /// image: File(/* IMAGE PATH HERE */), /// n: 1, /// size: OpenAIImageSize.size1024, - /// responseFormat: OpenAIResponseFormat.url, + /// responseFormat: OpenAIImageResponseFormat.url, /// ); /// ``` @override @@ -184,7 +184,7 @@ class OpenAIImages implements OpenAIImagesBase { required File image, int? n, OpenAIImageSize? size, - OpenAIResponseFormat? responseFormat, + OpenAIImageResponseFormat? responseFormat, String? user, }) async { final String variations = "/variations"; diff --git a/test/openai_test.dart b/test/openai_test.dart index 1d1f6ada..e631160c 100644 --- a/test/openai_test.dart +++ b/test/openai_test.dart @@ -5,6 +5,7 @@ import 'package:dart_openai/openai.dart'; import 'package:http/http.dart' as http; import 'package:test/test.dart'; +@Timeout(Duration(minutes: 2)) void main() async { final exampleImageFile = await getFileFromUrl( "https://upload.wikimedia.org/wikipedia/commons/7/7e/Dart-logo.png", @@ -34,7 +35,7 @@ void main() async { } }); test('with setting a key', () { - OpenAI.apiKey = "YOUR KEY HERE SO THE TESTS CAN RUN"; + OpenAI.apiKey = "PUT HERE YOUR API KEY"; expect(OpenAI.instance, isA()); }); @@ -125,7 +126,7 @@ void main() async { messages: [ OpenAIChatCompletionChoiceMessageModel( content: "Hello, how are you?", - role: "user", + role: OpenAIChatMessageRole.user, ), ], ); @@ -148,7 +149,7 @@ void main() async { messages: [ OpenAIChatCompletionChoiceMessageModel( content: "Hello, how are you?", - role: "user", + role: OpenAIChatMessageRole.user, ), ], ); @@ -160,17 +161,18 @@ void main() async { }); }); group('edits', () { - test('create', () async { - final OpenAIEditModel edit = await OpenAI.instance.edit.create( - model: "text-davinci-edit-001", - instruction: "remove the word 'made' from the text", - input: "I made something, idk man", - ); - expect(edit, isA()); - expect(edit.choices.first, isA()); - expect(edit.choices.first.text, isNotNull); - expect(edit.choices.first.text, isA()); - }); + //! temporary disabled, because the API have on this and throws an unexpected error from OpenAI end. + // test('create', () async { + // final OpenAIEditModel edit = await OpenAI.instance.edit.create( + // model: "text-davinci-edit-001", + // instruction: "remove the word 'made' from the text", + // input: "I made something, idk man", + // ); + // expect(edit, isA()); + // expect(edit.choices.first, isA()); + // expect(edit.choices.first.text, isNotNull); + // expect(edit.choices.first.text, isA()); + // }); }); group('images', () { test('create', () async {