Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 339d315

Browse files
committedJan 22, 2025
feat(api): update enum values, comments, and examples (#2045)
1 parent 709926f commit 339d315

20 files changed

+152
-146
lines changed
 

‎.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
configured_endpoints: 69
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml

‎src/openai/resources/audio/speech.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def create(
5353
*,
5454
input: str,
5555
model: Union[str, SpeechModel],
56-
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
56+
voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"],
5757
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
5858
speed: float | NotGiven = NOT_GIVEN,
5959
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -73,9 +73,9 @@ def create(
7373
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
7474
`tts-1` or `tts-1-hd`
7575
76-
voice: The voice to use when generating the audio. Supported voices are `alloy`,
77-
`echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
78-
available in the
76+
voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
77+
`coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the
78+
voices are available in the
7979
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
8080
8181
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
@@ -137,7 +137,7 @@ async def create(
137137
*,
138138
input: str,
139139
model: Union[str, SpeechModel],
140-
voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"],
140+
voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"],
141141
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN,
142142
speed: float | NotGiven = NOT_GIVEN,
143143
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -157,9 +157,9 @@ async def create(
157157
One of the available [TTS models](https://platform.openai.com/docs/models#tts):
158158
`tts-1` or `tts-1-hd`
159159
160-
voice: The voice to use when generating the audio. Supported voices are `alloy`,
161-
`echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
162-
available in the
160+
voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
161+
`coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the
162+
voices are available in the
163163
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
164164
165165
response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,

‎src/openai/resources/beta/realtime/sessions.py

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -46,18 +46,19 @@ def with_streaming_response(self) -> SessionsWithStreamingResponse:
4646
def create(
4747
self,
4848
*,
49+
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
50+
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
51+
instructions: str | NotGiven = NOT_GIVEN,
52+
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
53+
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
4954
model: Literal[
5055
"gpt-4o-realtime-preview",
5156
"gpt-4o-realtime-preview-2024-10-01",
5257
"gpt-4o-realtime-preview-2024-12-17",
5358
"gpt-4o-mini-realtime-preview",
5459
"gpt-4o-mini-realtime-preview-2024-12-17",
55-
],
56-
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
57-
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
58-
instructions: str | NotGiven = NOT_GIVEN,
59-
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
60-
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
60+
]
61+
| NotGiven = NOT_GIVEN,
6162
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
6263
temperature: float | NotGiven = NOT_GIVEN,
6364
tool_choice: str | NotGiven = NOT_GIVEN,
@@ -81,9 +82,9 @@ def create(
8182
the Realtime API.
8283
8384
Args:
84-
model: The Realtime model used for this session.
85-
86-
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
85+
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
86+
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
87+
(mono), and little-endian byte order.
8788
8889
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
8990
`null` to turn off once on. Input audio transcription is not native to the
@@ -110,7 +111,10 @@ def create(
110111
modalities: The set of modalities the model can respond with. To disable audio, set this to
111112
["text"].
112113
114+
model: The Realtime model used for this session.
115+
113116
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
117+
For `pcm16`, output audio is sampled at a rate of 24kHz.
114118
115119
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
116120
@@ -140,12 +144,12 @@ def create(
140144
"/realtime/sessions",
141145
body=maybe_transform(
142146
{
143-
"model": model,
144147
"input_audio_format": input_audio_format,
145148
"input_audio_transcription": input_audio_transcription,
146149
"instructions": instructions,
147150
"max_response_output_tokens": max_response_output_tokens,
148151
"modalities": modalities,
152+
"model": model,
149153
"output_audio_format": output_audio_format,
150154
"temperature": temperature,
151155
"tool_choice": tool_choice,
@@ -185,18 +189,19 @@ def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse:
185189
async def create(
186190
self,
187191
*,
192+
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
193+
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
194+
instructions: str | NotGiven = NOT_GIVEN,
195+
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
196+
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
188197
model: Literal[
189198
"gpt-4o-realtime-preview",
190199
"gpt-4o-realtime-preview-2024-10-01",
191200
"gpt-4o-realtime-preview-2024-12-17",
192201
"gpt-4o-mini-realtime-preview",
193202
"gpt-4o-mini-realtime-preview-2024-12-17",
194-
],
195-
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
196-
input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN,
197-
instructions: str | NotGiven = NOT_GIVEN,
198-
max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN,
199-
modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN,
203+
]
204+
| NotGiven = NOT_GIVEN,
200205
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN,
201206
temperature: float | NotGiven = NOT_GIVEN,
202207
tool_choice: str | NotGiven = NOT_GIVEN,
@@ -220,9 +225,9 @@ async def create(
220225
the Realtime API.
221226
222227
Args:
223-
model: The Realtime model used for this session.
224-
225-
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
228+
input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
229+
`pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
230+
(mono), and little-endian byte order.
226231
227232
input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to
228233
`null` to turn off once on. Input audio transcription is not native to the
@@ -249,7 +254,10 @@ async def create(
249254
modalities: The set of modalities the model can respond with. To disable audio, set this to
250255
["text"].
251256
257+
model: The Realtime model used for this session.
258+
252259
output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
260+
For `pcm16`, output audio is sampled at a rate of 24kHz.
253261
254262
temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
255263
@@ -279,12 +287,12 @@ async def create(
279287
"/realtime/sessions",
280288
body=await async_maybe_transform(
281289
{
282-
"model": model,
283290
"input_audio_format": input_audio_format,
284291
"input_audio_transcription": input_audio_transcription,
285292
"instructions": instructions,
286293
"max_response_output_tokens": max_response_output_tokens,
287294
"modalities": modalities,
295+
"model": model,
288296
"output_audio_format": output_audio_format,
289297
"temperature": temperature,
290298
"tool_choice": tool_choice,

‎src/openai/resources/chat/completions.py

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -251,9 +251,6 @@ def create(
251251
tier with a lower uptime SLA and no latency guarentee.
252252
- When not set, the default behavior is 'auto'.
253253
254-
When this parameter is set, the response body will include the `service_tier`
255-
utilized.
256-
257254
stop: Up to 4 sequences where the API will stop generating further tokens.
258255
259256
store: Whether or not to store the output of this chat completion request for use in
@@ -509,9 +506,6 @@ def create(
509506
tier with a lower uptime SLA and no latency guarentee.
510507
- When not set, the default behavior is 'auto'.
511508
512-
When this parameter is set, the response body will include the `service_tier`
513-
utilized.
514-
515509
stop: Up to 4 sequences where the API will stop generating further tokens.
516510
517511
store: Whether or not to store the output of this chat completion request for use in
@@ -760,9 +754,6 @@ def create(
760754
tier with a lower uptime SLA and no latency guarentee.
761755
- When not set, the default behavior is 'auto'.
762756
763-
When this parameter is set, the response body will include the `service_tier`
764-
utilized.
765-
766757
stop: Up to 4 sequences where the API will stop generating further tokens.
767758
768759
store: Whether or not to store the output of this chat completion request for use in
@@ -1112,9 +1103,6 @@ async def create(
11121103
tier with a lower uptime SLA and no latency guarentee.
11131104
- When not set, the default behavior is 'auto'.
11141105
1115-
When this parameter is set, the response body will include the `service_tier`
1116-
utilized.
1117-
11181106
stop: Up to 4 sequences where the API will stop generating further tokens.
11191107
11201108
store: Whether or not to store the output of this chat completion request for use in
@@ -1370,9 +1358,6 @@ async def create(
13701358
tier with a lower uptime SLA and no latency guarentee.
13711359
- When not set, the default behavior is 'auto'.
13721360
1373-
When this parameter is set, the response body will include the `service_tier`
1374-
utilized.
1375-
13761361
stop: Up to 4 sequences where the API will stop generating further tokens.
13771362
13781363
store: Whether or not to store the output of this chat completion request for use in
@@ -1621,9 +1606,6 @@ async def create(
16211606
tier with a lower uptime SLA and no latency guarentee.
16221607
- When not set, the default behavior is 'auto'.
16231608
1624-
When this parameter is set, the response body will include the `service_tier`
1625-
utilized.
1626-
16271609
stop: Up to 4 sequences where the API will stop generating further tokens.
16281610
16291611
store: Whether or not to store the output of this chat completion request for use in

‎src/openai/resources/embeddings.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,8 @@ def create(
6868
`text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
6969
dimensions or less.
7070
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
71-
for counting tokens.
71+
for counting tokens. Some models may also impose a limit on total number of
72+
tokens summed across inputs.
7273
7374
model: ID of the model to use. You can use the
7475
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
@@ -180,7 +181,8 @@ async def create(
180181
`text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
181182
dimensions or less.
182183
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
183-
for counting tokens.
184+
for counting tokens. Some models may also impose a limit on total number of
185+
tokens summed across inputs.
184186
185187
model: ID of the model to use. You can use the
186188
[List models](https://platform.openai.com/docs/api-reference/models/list) API to

‎src/openai/types/audio/speech_create_params.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,11 @@ class SpeechCreateParams(TypedDict, total=False):
2020
`tts-1` or `tts-1-hd`
2121
"""
2222

23-
voice: Required[Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]]
23+
voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]]
2424
"""The voice to use when generating the audio.
2525
26-
Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`.
27-
Previews of the voices are available in the
26+
Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`,
27+
`sage` and `shimmer`. Previews of the voices are available in the
2828
[Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
2929
"""
3030

‎src/openai/types/beta/realtime/conversation_item_create_event.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,10 @@ class ConversationItemCreateEvent(BaseModel):
2020
"""Optional client-generated ID used to identify this event."""
2121

2222
previous_item_id: Optional[str] = None
23-
"""The ID of the preceding item after which the new item will be inserted.
24-
25-
If not set, the new item will be appended to the end of the conversation. If
26-
set, it allows an item to be inserted mid-conversation. If the ID cannot be
27-
found, an error will be returned and the item will not be added.
23+
"""
24+
The ID of the preceding item after which the new item will be inserted. If not
25+
set, the new item will be appended to the end of the conversation. If set to
26+
`root`, the new item will be added to the beginning of the conversation. If set
27+
to an existing ID, it allows an item to be inserted mid-conversation. If the ID
28+
cannot be found, an error will be returned and the item will not be added.
2829
"""

‎src/openai/types/beta/realtime/conversation_item_create_event_param.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False):
2020
"""Optional client-generated ID used to identify this event."""
2121

2222
previous_item_id: str
23-
"""The ID of the preceding item after which the new item will be inserted.
24-
25-
If not set, the new item will be appended to the end of the conversation. If
26-
set, it allows an item to be inserted mid-conversation. If the ID cannot be
27-
found, an error will be returned and the item will not be added.
23+
"""
24+
The ID of the preceding item after which the new item will be inserted. If not
25+
set, the new item will be appended to the end of the conversation. If set to
26+
`root`, the new item will be added to the beginning of the conversation. If set
27+
to an existing ID, it allows an item to be inserted mid-conversation. If the ID
28+
cannot be found, an error will be returned and the item will not be added.
2829
"""

‎src/openai/types/beta/realtime/session.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,12 @@ class Session(BaseModel):
6363
"""Unique identifier for the session object."""
6464

6565
input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
66-
"""The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
66+
"""The format of input audio.
67+
68+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
69+
be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
70+
byte order.
71+
"""
6772

6873
input_audio_transcription: Optional[InputAudioTranscription] = None
6974
"""
@@ -117,7 +122,11 @@ class Session(BaseModel):
117122
"""The Realtime model used for this session."""
118123

119124
output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
120-
"""The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
125+
"""The format of output audio.
126+
127+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
128+
sampled at a rate of 24kHz.
129+
"""
121130

122131
temperature: Optional[float] = None
123132
"""Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""

‎src/openai/types/beta/realtime/session_create_params.py

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,25 +3,19 @@
33
from __future__ import annotations
44

55
from typing import List, Union, Iterable
6-
from typing_extensions import Literal, Required, TypedDict
6+
from typing_extensions import Literal, TypedDict
77

88
__all__ = ["SessionCreateParams", "InputAudioTranscription", "Tool", "TurnDetection"]
99

1010

1111
class SessionCreateParams(TypedDict, total=False):
12-
model: Required[
13-
Literal[
14-
"gpt-4o-realtime-preview",
15-
"gpt-4o-realtime-preview-2024-10-01",
16-
"gpt-4o-realtime-preview-2024-12-17",
17-
"gpt-4o-mini-realtime-preview",
18-
"gpt-4o-mini-realtime-preview-2024-12-17",
19-
]
20-
]
21-
"""The Realtime model used for this session."""
22-
2312
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
24-
"""The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
13+
"""The format of input audio.
14+
15+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
16+
be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
17+
byte order.
18+
"""
2519

2620
input_audio_transcription: InputAudioTranscription
2721
"""
@@ -61,8 +55,21 @@ class SessionCreateParams(TypedDict, total=False):
6155
To disable audio, set this to ["text"].
6256
"""
6357

58+
model: Literal[
59+
"gpt-4o-realtime-preview",
60+
"gpt-4o-realtime-preview-2024-10-01",
61+
"gpt-4o-realtime-preview-2024-12-17",
62+
"gpt-4o-mini-realtime-preview",
63+
"gpt-4o-mini-realtime-preview-2024-12-17",
64+
]
65+
"""The Realtime model used for this session."""
66+
6467
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
65-
"""The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
68+
"""The format of output audio.
69+
70+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
71+
sampled at a rate of 24kHz.
72+
"""
6673

6774
temperature: float
6875
"""Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""

‎src/openai/types/beta/realtime/session_update_event.py

Lines changed: 22 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -65,17 +65,13 @@ class SessionTurnDetection(BaseModel):
6565

6666

6767
class Session(BaseModel):
68-
model: Literal[
69-
"gpt-4o-realtime-preview",
70-
"gpt-4o-realtime-preview-2024-10-01",
71-
"gpt-4o-realtime-preview-2024-12-17",
72-
"gpt-4o-mini-realtime-preview",
73-
"gpt-4o-mini-realtime-preview-2024-12-17",
74-
]
75-
"""The Realtime model used for this session."""
76-
7768
input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
78-
"""The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
69+
"""The format of input audio.
70+
71+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
72+
be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
73+
byte order.
74+
"""
7975

8076
input_audio_transcription: Optional[SessionInputAudioTranscription] = None
8177
"""
@@ -115,8 +111,23 @@ class Session(BaseModel):
115111
To disable audio, set this to ["text"].
116112
"""
117113

114+
model: Optional[
115+
Literal[
116+
"gpt-4o-realtime-preview",
117+
"gpt-4o-realtime-preview-2024-10-01",
118+
"gpt-4o-realtime-preview-2024-12-17",
119+
"gpt-4o-mini-realtime-preview",
120+
"gpt-4o-mini-realtime-preview-2024-12-17",
121+
]
122+
] = None
123+
"""The Realtime model used for this session."""
124+
118125
output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None
119-
"""The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
126+
"""The format of output audio.
127+
128+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
129+
sampled at a rate of 24kHz.
130+
"""
120131

121132
temperature: Optional[float] = None
122133
"""Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""

‎src/openai/types/beta/realtime/session_update_event_param.py

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -71,19 +71,13 @@ class SessionTurnDetection(TypedDict, total=False):
7171

7272

7373
class Session(TypedDict, total=False):
74-
model: Required[
75-
Literal[
76-
"gpt-4o-realtime-preview",
77-
"gpt-4o-realtime-preview-2024-10-01",
78-
"gpt-4o-realtime-preview-2024-12-17",
79-
"gpt-4o-mini-realtime-preview",
80-
"gpt-4o-mini-realtime-preview-2024-12-17",
81-
]
82-
]
83-
"""The Realtime model used for this session."""
84-
8574
input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
86-
"""The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
75+
"""The format of input audio.
76+
77+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must
78+
be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian
79+
byte order.
80+
"""
8781

8882
input_audio_transcription: SessionInputAudioTranscription
8983
"""
@@ -123,8 +117,21 @@ class Session(TypedDict, total=False):
123117
To disable audio, set this to ["text"].
124118
"""
125119

120+
model: Literal[
121+
"gpt-4o-realtime-preview",
122+
"gpt-4o-realtime-preview-2024-10-01",
123+
"gpt-4o-realtime-preview-2024-12-17",
124+
"gpt-4o-mini-realtime-preview",
125+
"gpt-4o-mini-realtime-preview-2024-12-17",
126+
]
127+
"""The Realtime model used for this session."""
128+
126129
output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"]
127-
"""The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`."""
130+
"""The format of output audio.
131+
132+
Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is
133+
sampled at a rate of 24kHz.
134+
"""
128135

129136
temperature: float
130137
"""Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8."""

‎src/openai/types/chat/chat_completion.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -60,11 +60,7 @@ class ChatCompletion(BaseModel):
6060
"""The object type, which is always `chat.completion`."""
6161

6262
service_tier: Optional[Literal["scale", "default"]] = None
63-
"""The service tier used for processing the request.
64-
65-
This field is only included if the `service_tier` parameter is specified in the
66-
request.
67-
"""
63+
"""The service tier used for processing the request."""
6864

6965
system_fingerprint: Optional[str] = None
7066
"""This fingerprint represents the backend configuration that the model runs with.

‎src/openai/types/chat/chat_completion_assistant_message_param.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
3838
"""The role of the messages author, in this case `assistant`."""
3939

4040
audio: Optional[Audio]
41-
"""Data about a previous audio response from the model.
42-
41+
"""
42+
Data about a previous audio response from the model.
4343
[Learn more](https://platform.openai.com/docs/guides/audio).
4444
"""
4545

‎src/openai/types/chat/chat_completion_chunk.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -129,11 +129,7 @@ class ChatCompletionChunk(BaseModel):
129129
"""The object type, which is always `chat.completion.chunk`."""
130130

131131
service_tier: Optional[Literal["scale", "default"]] = None
132-
"""The service tier used for processing the request.
133-
134-
This field is only included if the `service_tier` parameter is specified in the
135-
request.
136-
"""
132+
"""The service tier used for processing the request."""
137133

138134
system_fingerprint: Optional[str] = None
139135
"""

‎src/openai/types/chat/completion_create_params.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -220,9 +220,6 @@ class CompletionCreateParamsBase(TypedDict, total=False):
220220
- If set to 'default', the request will be processed using the default service
221221
tier with a lower uptime SLA and no latency guarentee.
222222
- When not set, the default behavior is 'auto'.
223-
224-
When this parameter is set, the response body will include the `service_tier`
225-
utilized.
226223
"""
227224

228225
stop: Union[Optional[str], List[str]]

‎src/openai/types/embedding_create_params.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,8 @@ class EmbeddingCreateParams(TypedDict, total=False):
1919
(8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any
2020
array must be 2048 dimensions or less.
2121
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
22-
for counting tokens.
22+
for counting tokens. Some models may also impose a limit on total number of
23+
tokens summed across inputs.
2324
"""
2425

2526
model: Required[Union[str, EmbeddingModel]]

‎tests/api_resources/beta/realtime/test_sessions.py

Lines changed: 8 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -19,20 +19,18 @@ class TestSessions:
1919

2020
@parametrize
2121
def test_method_create(self, client: OpenAI) -> None:
22-
session = client.beta.realtime.sessions.create(
23-
model="gpt-4o-realtime-preview",
24-
)
22+
session = client.beta.realtime.sessions.create()
2523
assert_matches_type(SessionCreateResponse, session, path=["response"])
2624

2725
@parametrize
2826
def test_method_create_with_all_params(self, client: OpenAI) -> None:
2927
session = client.beta.realtime.sessions.create(
30-
model="gpt-4o-realtime-preview",
3128
input_audio_format="pcm16",
3229
input_audio_transcription={"model": "model"},
3330
instructions="instructions",
3431
max_response_output_tokens=0,
3532
modalities=["text"],
33+
model="gpt-4o-realtime-preview",
3634
output_audio_format="pcm16",
3735
temperature=0,
3836
tool_choice="tool_choice",
@@ -57,9 +55,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
5755

5856
@parametrize
5957
def test_raw_response_create(self, client: OpenAI) -> None:
60-
response = client.beta.realtime.sessions.with_raw_response.create(
61-
model="gpt-4o-realtime-preview",
62-
)
58+
response = client.beta.realtime.sessions.with_raw_response.create()
6359

6460
assert response.is_closed is True
6561
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -68,9 +64,7 @@ def test_raw_response_create(self, client: OpenAI) -> None:
6864

6965
@parametrize
7066
def test_streaming_response_create(self, client: OpenAI) -> None:
71-
with client.beta.realtime.sessions.with_streaming_response.create(
72-
model="gpt-4o-realtime-preview",
73-
) as response:
67+
with client.beta.realtime.sessions.with_streaming_response.create() as response:
7468
assert not response.is_closed
7569
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
7670

@@ -85,20 +79,18 @@ class TestAsyncSessions:
8579

8680
@parametrize
8781
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
88-
session = await async_client.beta.realtime.sessions.create(
89-
model="gpt-4o-realtime-preview",
90-
)
82+
session = await async_client.beta.realtime.sessions.create()
9183
assert_matches_type(SessionCreateResponse, session, path=["response"])
9284

9385
@parametrize
9486
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
9587
session = await async_client.beta.realtime.sessions.create(
96-
model="gpt-4o-realtime-preview",
9788
input_audio_format="pcm16",
9889
input_audio_transcription={"model": "model"},
9990
instructions="instructions",
10091
max_response_output_tokens=0,
10192
modalities=["text"],
93+
model="gpt-4o-realtime-preview",
10294
output_audio_format="pcm16",
10395
temperature=0,
10496
tool_choice="tool_choice",
@@ -123,9 +115,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
123115

124116
@parametrize
125117
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
126-
response = await async_client.beta.realtime.sessions.with_raw_response.create(
127-
model="gpt-4o-realtime-preview",
128-
)
118+
response = await async_client.beta.realtime.sessions.with_raw_response.create()
129119

130120
assert response.is_closed is True
131121
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -134,9 +124,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
134124

135125
@parametrize
136126
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
137-
async with async_client.beta.realtime.sessions.with_streaming_response.create(
138-
model="gpt-4o-realtime-preview",
139-
) as response:
127+
async with async_client.beta.realtime.sessions.with_streaming_response.create() as response:
140128
assert not response.is_closed
141129
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
142130

‎tests/api_resources/chat/test_completions.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
7272
presence_penalty=-2,
7373
reasoning_effort="low",
7474
response_format={"type": "text"},
75-
seed=-9007199254740991,
75+
seed=0,
7676
service_tier="auto",
7777
stop="string",
7878
store=True,
@@ -187,7 +187,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
187187
presence_penalty=-2,
188188
reasoning_effort="low",
189189
response_format={"type": "text"},
190-
seed=-9007199254740991,
190+
seed=0,
191191
service_tier="auto",
192192
stop="string",
193193
store=True,
@@ -321,7 +321,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
321321
presence_penalty=-2,
322322
reasoning_effort="low",
323323
response_format={"type": "text"},
324-
seed=-9007199254740991,
324+
seed=0,
325325
service_tier="auto",
326326
stop="string",
327327
store=True,
@@ -436,7 +436,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
436436
presence_penalty=-2,
437437
reasoning_effort="low",
438438
response_format={"type": "text"},
439-
seed=-9007199254740991,
439+
seed=0,
440440
service_tier="auto",
441441
stop="string",
442442
store=True,

‎tests/api_resources/test_completions.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
3838
max_tokens=16,
3939
n=1,
4040
presence_penalty=-2,
41-
seed=-9007199254740991,
41+
seed=0,
4242
stop="\n",
4343
stream=False,
4444
stream_options={"include_usage": True},
@@ -98,7 +98,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
9898
max_tokens=16,
9999
n=1,
100100
presence_penalty=-2,
101-
seed=-9007199254740991,
101+
seed=0,
102102
stop="\n",
103103
stream_options={"include_usage": True},
104104
suffix="test.",
@@ -160,7 +160,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
160160
max_tokens=16,
161161
n=1,
162162
presence_penalty=-2,
163-
seed=-9007199254740991,
163+
seed=0,
164164
stop="\n",
165165
stream=False,
166166
stream_options={"include_usage": True},
@@ -220,7 +220,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
220220
max_tokens=16,
221221
n=1,
222222
presence_penalty=-2,
223-
seed=-9007199254740991,
223+
seed=0,
224224
stop="\n",
225225
stream_options={"include_usage": True},
226226
suffix="test.",

0 commit comments

Comments
 (0)
Please sign in to comment.