Skip to content

Commit 9552c35

Browse files
committed
[ Add ] added enum for audio response format for better dev experience
1 parent cfae703 commit 9552c35

File tree

6 files changed

+26
-8
lines changed

6 files changed

+26
-8
lines changed

README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -340,6 +340,7 @@ for transcribing an audio `File`, you can use the `createTranscription()` method
340340
OpenAIAudioModel transcription = OpenAI.instance.audio.createTranscription(
341341
file: /* THE AUDIO FILE HERE */,
342342
model: "whisper-1",
343+
responseFormat: OpenAIAudioResponseFormat.json,
343344
);
344345
```
345346

@@ -351,6 +352,8 @@ to get access to the translation API, and translate an audio file to english, yo
351352
OpenAIAudioModel translation = await OpenAI.instance.audio.createTranslation(
352353
file: /* THE AUDIO FILE HERE */,
353354
model: "whisper-1",
355+
responseFormat: OpenAIAudioResponseFormat.text,
356+
354357
);
355358
```
356359

example/lib/create_audio_translation.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ Future<void> main() async {
1616
'https://www.cbvoiceovers.com/wp-content/uploads/2017/05/Commercial-showreel.mp3',
1717
fileExtension: "mp3"),
1818
model: "whisper-1",
19-
responseFormat: "json",
19+
responseFormat: OpenAIAudioResponseFormat.json,
2020
);
2121

2222
// print the translation.

lib/src/core/base/audio/interfaces.dart

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ abstract class CreateInterface {
77
required File file,
88
required String model,
99
String? prompt,
10-
String? responseFormat,
10+
OpenAIAudioResponseFormat? responseFormat,
1111
double? temperature,
1212
String? language,
1313
});
@@ -16,7 +16,7 @@ abstract class CreateInterface {
1616
required File file,
1717
required String model,
1818
String? prompt,
19-
String? responseFormat,
19+
OpenAIAudioResponseFormat? responseFormat,
2020
double? temperature,
2121
});
2222
}
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
enum OpenAIImageSize { size256, size512, size1024 }
22

33
enum OpenAIResponseFormat { url, b64Json }
4+
5+
enum OpenAIAudioResponseFormat { json, text, srt, verbose_json, vtt }

lib/src/instance/audio/audio.dart

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,21 +1,34 @@
1+
import 'dart:math';
2+
13
import 'package:dart_openai/src/core/builder/base_api_url.dart';
24
import 'package:dart_openai/src/core/models/audio/audio.dart';
35
import 'package:dart_openai/src/core/networking/client.dart';
46

57
import 'dart:io';
68

9+
import '../../../openai.dart';
710
import '../../core/base/audio/audio.dart';
11+
import '../../core/utils/logger.dart';
812

13+
/// {@template openai_audio}
14+
/// This class is responsible for handling all audio requests, such as creating a transcription or translation for a given audio file.
15+
/// {@endtemplate}
916
class OpenAIAudio implements OpenAIAudioBase {
1017
@override
1118
String get endpoint => "/audio";
1219

20+
/// {@macro openai_audio}
21+
OpenAIAudio() {
22+
OpenAILogger.logEndpoint(endpoint);
23+
}
24+
25+
1326
@override
1427
Future<OpenAIAudioModel> createTranscription({
1528
required File file,
1629
required String model,
1730
String? prompt,
18-
String? responseFormat,
31+
OpenAIAudioResponseFormat? responseFormat,
1932
double? temperature,
2033
String? language,
2134
}) async {
@@ -25,7 +38,7 @@ class OpenAIAudio implements OpenAIAudioBase {
2538
body: {
2639
"model": model,
2740
if (prompt != null) "prompt": prompt,
28-
if (responseFormat != null) "response_format": responseFormat,
41+
if (responseFormat != null) "response_format": responseFormat.name,
2942
if (temperature != null) "temperature": temperature.toString(),
3043
if (language != null) "language": language,
3144
},
@@ -40,7 +53,7 @@ class OpenAIAudio implements OpenAIAudioBase {
4053
required File file,
4154
required String model,
4255
String? prompt,
43-
String? responseFormat,
56+
OpenAIAudioResponseFormat? responseFormat,
4457
double? temperature,
4558
}) async {
4659
return await OpenAINetworkingClient.fileUpload(
@@ -49,7 +62,7 @@ class OpenAIAudio implements OpenAIAudioBase {
4962
body: {
5063
"model": model,
5164
if (prompt != null) "prompt": prompt,
52-
if (responseFormat != null) "response_format": responseFormat,
65+
if (responseFormat != null) "response_format": responseFormat.name,
5366
if (temperature != null) "temperature": temperature.toString(),
5467
},
5568
onSuccess: (Map<String, dynamic> response) {

test/openai_test.dart

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ void main() async {
219219
final transcription = await OpenAI.instance.audio.createTranscription(
220220
file: audioExampleFile,
221221
model: "whisper-1",
222-
responseFormat: "json",
222+
responseFormat: OpenAIAudioResponseFormat.json,
223223
);
224224

225225
expect(transcription, isA<OpenAIAudioModel>());

0 commit comments

Comments
 (0)