From d76a748f606743407f94dfc26758095560e2082a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Mon, 13 Jan 2025 16:20:02 +0000
Subject: [PATCH 01/11] chore(internal): streaming refactors (#2012)

---
 src/openai/_streaming.py | 66 +++++++++++++++++++---------------------
 1 file changed, 32 insertions(+), 34 deletions(-)

diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py
index 0fda992cff..b275986a46 100644
--- a/src/openai/_streaming.py
+++ b/src/openai/_streaming.py
@@ -59,23 +59,22 @@ def __stream__(self) -> Iterator[_T]:
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None:
-                data = sse.json()
-                if is_mapping(data) and data.get("error"):
-                    message = None
-                    error = data.get("error")
-                    if is_mapping(error):
-                        message = error.get("message")
-                    if not message or not isinstance(message, str):
-                        message = "An error occurred during streaming"
-
-                    raise APIError(
-                        message=message,
-                        request=self.response.request,
-                        body=data["error"],
-                    )
-
-                yield process_data(data=data, cast_to=cast_to, response=response)
+            data = sse.json()
+            if is_mapping(data) and data.get("error"):
+                message = None
+                error = data.get("error")
+                if is_mapping(error):
+                    message = error.get("message")
+                if not message or not isinstance(message, str):
+                    message = "An error occurred during streaming"
+
+                raise APIError(
+                    message=message,
+                    request=self.response.request,
+                    body=data["error"],
+                )
+
+            yield process_data(data=data, cast_to=cast_to, response=response)
 
             else:
                 data = sse.json()
@@ -161,23 +160,22 @@ async def __stream__(self) -> AsyncIterator[_T]:
             if sse.data.startswith("[DONE]"):
                 break
 
-            if sse.event is None:
-                data = sse.json()
-                if is_mapping(data) and data.get("error"):
-                    message = None
-                    error = data.get("error")
-                    if is_mapping(error):
-                        message = error.get("message")
-                    if not message or not isinstance(message, str):
-                        message = "An error occurred during streaming"
-
-                    raise APIError(
-                        message=message,
-                        request=self.response.request,
-                        body=data["error"],
-                    )
-
-                yield process_data(data=data, cast_to=cast_to, response=response)
+            data = sse.json()
+            if is_mapping(data) and data.get("error"):
+                message = None
+                error = data.get("error")
+                if is_mapping(error):
+                    message = error.get("message")
+                if not message or not isinstance(message, str):
+                    message = "An error occurred during streaming"
+
+                raise APIError(
+                    message=message,
+                    request=self.response.request,
+                    body=data["error"],
+                )
+
+            yield process_data(data=data, cast_to=cast_to, response=response)
 
             else:
                 data = sse.json()

From c16f58ead0bc85055b164182689ba74b7e939dfa Mon Sep 17 00:00:00 2001
From: Robert Craigie <robert@craigie.dev>
Date: Mon, 13 Jan 2025 16:59:43 +0000
Subject: [PATCH 02/11] fix: streaming

---
 src/openai/_streaming.py | 38 --------------------------------------
 1 file changed, 38 deletions(-)

diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py
index b275986a46..7aa7b62f6b 100644
--- a/src/openai/_streaming.py
+++ b/src/openai/_streaming.py
@@ -76,25 +76,6 @@ def __stream__(self) -> Iterator[_T]:
 
             yield process_data(data=data, cast_to=cast_to, response=response)
 
-            else:
-                data = sse.json()
-
-                if sse.event == "error" and is_mapping(data) and data.get("error"):
-                    message = None
-                    error = data.get("error")
-                    if is_mapping(error):
-                        message = error.get("message")
-                    if not message or not isinstance(message, str):
-                        message = "An error occurred during streaming"
-
-                    raise APIError(
-                        message=message,
-                        request=self.response.request,
-                        body=data["error"],
-                    )
-
-                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
-
         # Ensure the entire stream is consumed
         for _sse in iterator:
             ...
@@ -177,25 +158,6 @@ async def __stream__(self) -> AsyncIterator[_T]:
 
             yield process_data(data=data, cast_to=cast_to, response=response)
 
-            else:
-                data = sse.json()
-
-                if sse.event == "error" and is_mapping(data) and data.get("error"):
-                    message = None
-                    error = data.get("error")
-                    if is_mapping(error):
-                        message = error.get("message")
-                    if not message or not isinstance(message, str):
-                        message = "An error occurred during streaming"
-
-                    raise APIError(
-                        message=message,
-                        request=self.response.request,
-                        body=data["error"],
-                    )
-
-                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
-
         # Ensure the entire stream is consumed
         async for _sse in iterator:
             ...

From 84f2f9c0439229a7db7136fe78419292d34d1f81 Mon Sep 17 00:00:00 2001
From: Krista Pratico <krpratic@microsoft.com>
Date: Mon, 13 Jan 2025 10:08:10 -0800
Subject: [PATCH 03/11] docs(examples/azure): example script with realtime API
 (#1967)

---
 examples/realtime/azure_realtime.py | 57 +++++++++++++++++++++++++++++
 1 file changed, 57 insertions(+)
 create mode 100644 examples/realtime/azure_realtime.py

diff --git a/examples/realtime/azure_realtime.py b/examples/realtime/azure_realtime.py
new file mode 100644
index 0000000000..de88d47052
--- /dev/null
+++ b/examples/realtime/azure_realtime.py
@@ -0,0 +1,57 @@
+import os
+import asyncio
+
+from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider
+
+from openai import AsyncAzureOpenAI
+
+# Azure OpenAI Realtime Docs
+
+# How-to: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio
+# Supported models and API versions: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio#supported-models
+# Entra ID auth: https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity
+
+
+async def main() -> None:
+    """The following example demonstrates how to configure Azure OpenAI to use the Realtime API.
+    For an audio example, see push_to_talk_app.py and update the client and model parameter accordingly.
+
+    When prompted for user input, type a message and hit enter to send it to the model.
+    Enter "q" to quit the conversation.
+    """
+
+    credential = DefaultAzureCredential()
+    client = AsyncAzureOpenAI(
+        azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"],
+        azure_ad_token_provider=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default"),
+        api_version="2024-10-01-preview",
+    )
+    async with client.beta.realtime.connect(
+        model="gpt-4o-realtime-preview",  # deployment name for your model
+    ) as connection:
+        await connection.session.update(session={"modalities": ["text"]})  # type: ignore
+        while True:
+            user_input = input("Enter a message: ")
+            if user_input == "q":
+                break
+
+            await connection.conversation.item.create(
+                item={
+                    "type": "message",
+                    "role": "user",
+                    "content": [{"type": "input_text", "text": user_input}],
+                }
+            )
+            await connection.response.create()
+            async for event in connection:
+                if event.type == "response.text.delta":
+                    print(event.delta, flush=True, end="")
+                elif event.type == "response.text.done":
+                    print()
+                elif event.type == "response.done":
+                    break
+
+    await credential.close()
+
+
+asyncio.run(main())

From 6950477dc83bd44eb0b80a41bb698d237fbf4eca Mon Sep 17 00:00:00 2001
From: Robert Craigie <robert@craigie.dev>
Date: Mon, 13 Jan 2025 20:23:17 +0000
Subject: [PATCH 04/11] Revert "chore(internal): streaming refactors (#2012)"

This reverts commit d76a748f606743407f94dfc26758095560e2082a.
---
 src/openai/_streaming.py | 104 +++++++++++++++++++++++++++------------
 1 file changed, 72 insertions(+), 32 deletions(-)

diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py
index 7aa7b62f6b..0fda992cff 100644
--- a/src/openai/_streaming.py
+++ b/src/openai/_streaming.py
@@ -59,22 +59,42 @@ def __stream__(self) -> Iterator[_T]:
             if sse.data.startswith("[DONE]"):
                 break
 
-            data = sse.json()
-            if is_mapping(data) and data.get("error"):
-                message = None
-                error = data.get("error")
-                if is_mapping(error):
-                    message = error.get("message")
-                if not message or not isinstance(message, str):
-                    message = "An error occurred during streaming"
-
-                raise APIError(
-                    message=message,
-                    request=self.response.request,
-                    body=data["error"],
-                )
-
-            yield process_data(data=data, cast_to=cast_to, response=response)
+            if sse.event is None:
+                data = sse.json()
+                if is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data=data, cast_to=cast_to, response=response)
+
+            else:
+                data = sse.json()
+
+                if sse.event == "error" and is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
 
         # Ensure the entire stream is consumed
         for _sse in iterator:
@@ -141,22 +161,42 @@ async def __stream__(self) -> AsyncIterator[_T]:
             if sse.data.startswith("[DONE]"):
                 break
 
-            data = sse.json()
-            if is_mapping(data) and data.get("error"):
-                message = None
-                error = data.get("error")
-                if is_mapping(error):
-                    message = error.get("message")
-                if not message or not isinstance(message, str):
-                    message = "An error occurred during streaming"
-
-                raise APIError(
-                    message=message,
-                    request=self.response.request,
-                    body=data["error"],
-                )
-
-            yield process_data(data=data, cast_to=cast_to, response=response)
+            if sse.event is None:
+                data = sse.json()
+                if is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data=data, cast_to=cast_to, response=response)
+
+            else:
+                data = sse.json()
+
+                if sse.event == "error" and is_mapping(data) and data.get("error"):
+                    message = None
+                    error = data.get("error")
+                    if is_mapping(error):
+                        message = error.get("message")
+                    if not message or not isinstance(message, str):
+                        message = "An error occurred during streaming"
+
+                    raise APIError(
+                        message=message,
+                        request=self.response.request,
+                        body=data["error"],
+                    )
+
+                yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response)
 
         # Ensure the entire stream is consumed
         async for _sse in iterator:

From 514e0e415f87ab4510262d29ed6125384e017b84 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 14 Jan 2025 11:56:21 +0000
Subject: [PATCH 05/11] chore(internal): update deps (#2015)

---
 mypy.ini              | 2 +-
 requirements-dev.lock | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/mypy.ini b/mypy.ini
index 1ea1fe909d..660f1a086e 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -44,7 +44,7 @@ cache_fine_grained = True
 # ```
 # Changing this codegen to make mypy happy would increase complexity
 # and would not be worth it.
-disable_error_code = func-returns-value
+disable_error_code = func-returns-value,overload-cannot-match
 
 # https://github.com/python/mypy/issues/12162
 [mypy.overrides]
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 15ecbf081a..8799e10b06 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -83,7 +83,7 @@ msal==1.31.0
     # via msal-extensions
 msal-extensions==1.2.0
     # via azure-identity
-mypy==1.13.0
+mypy==1.14.1
 mypy-extensions==1.0.0
     # via black
     # via mypy
@@ -124,7 +124,7 @@ pygments==2.18.0
     # via rich
 pyjwt==2.8.0
     # via msal
-pyright==1.1.390
+pyright==1.1.391
 pytest==8.3.3
     # via pytest-asyncio
 pytest-asyncio==0.24.0

From e38927950a5cdad99065853fe7b72aad6bb322e9 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 14 Jan 2025 11:59:51 +0000
Subject: [PATCH 06/11] fix(types): correct type for vector store chunking
 strategy (#2017)

---
 api.md                                           |  2 +-
 src/openai/types/beta/__init__.py                |  3 +++
 .../types/beta/file_chunking_strategy_param.py   |  4 ++--
 ...static_file_chunking_strategy_object_param.py | 16 ++++++++++++++++
 4 files changed, 22 insertions(+), 3 deletions(-)
 create mode 100644 src/openai/types/beta/static_file_chunking_strategy_object_param.py

diff --git a/api.md b/api.md
index ace93e0559..1edd3f6589 100644
--- a/api.md
+++ b/api.md
@@ -314,7 +314,7 @@ from openai.types.beta import (
     OtherFileChunkingStrategyObject,
     StaticFileChunkingStrategy,
     StaticFileChunkingStrategyObject,
-    StaticFileChunkingStrategyParam,
+    StaticFileChunkingStrategyObjectParam,
     VectorStore,
     VectorStoreDeleted,
 )
diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py
index 7f76fed0cd..b9ea792bfa 100644
--- a/src/openai/types/beta/__init__.py
+++ b/src/openai/types/beta/__init__.py
@@ -43,3 +43,6 @@
 from .assistant_response_format_option_param import (
     AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam,
 )
+from .static_file_chunking_strategy_object_param import (
+    StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam,
+)
diff --git a/src/openai/types/beta/file_chunking_strategy_param.py b/src/openai/types/beta/file_chunking_strategy_param.py
index 46383358e5..25d94286d8 100644
--- a/src/openai/types/beta/file_chunking_strategy_param.py
+++ b/src/openai/types/beta/file_chunking_strategy_param.py
@@ -6,8 +6,8 @@
 from typing_extensions import TypeAlias
 
 from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam
-from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam
+from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam
 
 __all__ = ["FileChunkingStrategyParam"]
 
-FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam]
+FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam]
diff --git a/src/openai/types/beta/static_file_chunking_strategy_object_param.py b/src/openai/types/beta/static_file_chunking_strategy_object_param.py
new file mode 100644
index 0000000000..0cdf35c0df
--- /dev/null
+++ b/src/openai/types/beta/static_file_chunking_strategy_object_param.py
@@ -0,0 +1,16 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam
+
+__all__ = ["StaticFileChunkingStrategyObjectParam"]
+
+
+class StaticFileChunkingStrategyObjectParam(TypedDict, total=False):
+    static: Required[StaticFileChunkingStrategyParam]
+
+    type: Required[Literal["static"]]
+    """Always `static`."""

From f26746cbcd893d66cf8a3fd68a7c3779dc8c833c Mon Sep 17 00:00:00 2001
From: Robert Craigie <robert@craigie.dev>
Date: Wed, 15 Jan 2025 14:06:06 +0000
Subject: [PATCH 07/11] chore(examples): update realtime model

closes #2020
---
 README.md                             | 4 ++--
 examples/realtime/push_to_talk_app.py | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/README.md b/README.md
index ad1c9afd10..ec556bd27a 100644
--- a/README.md
+++ b/README.md
@@ -275,7 +275,7 @@ from openai import AsyncOpenAI
 async def main():
     client = AsyncOpenAI()
 
-    async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection:
+    async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection:
         await connection.session.update(session={'modalities': ['text']})
 
         await connection.conversation.item.create(
@@ -309,7 +309,7 @@ Whenever an error occurs, the Realtime API will send an [`error` event](https://
 ```py
 client = AsyncOpenAI()
 
-async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection:
+async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection:
     ...
     async for event in connection:
         if event.type == 'error':
diff --git a/examples/realtime/push_to_talk_app.py b/examples/realtime/push_to_talk_app.py
index d46945a8ed..8dc303a83a 100755
--- a/examples/realtime/push_to_talk_app.py
+++ b/examples/realtime/push_to_talk_app.py
@@ -152,7 +152,7 @@ async def on_mount(self) -> None:
         self.run_worker(self.send_mic_audio())
 
     async def handle_realtime_connection(self) -> None:
-        async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as conn:
+        async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview") as conn:
             self.connection = conn
             self.connected.set()
 

From 0a9a0f5d8b9d5457643798287f893305006dd518 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 15 Jan 2025 17:51:10 +0000
Subject: [PATCH 08/11] chore(internal): bump pyright dependency (#2021)

---
 requirements-dev.lock          |  2 +-
 src/openai/_legacy_response.py | 12 ++++++++++--
 src/openai/_response.py        |  8 +++++++-
 3 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/requirements-dev.lock b/requirements-dev.lock
index 8799e10b06..ef26591f12 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -124,7 +124,7 @@ pygments==2.18.0
     # via rich
 pyjwt==2.8.0
     # via msal
-pyright==1.1.391
+pyright==1.1.392.post0
 pytest==8.3.3
     # via pytest-asyncio
 pytest-asyncio==0.24.0
diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py
index 7a14f27adb..25680049dc 100644
--- a/src/openai/_legacy_response.py
+++ b/src/openai/_legacy_response.py
@@ -269,7 +269,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
         if origin == LegacyAPIResponse:
             raise RuntimeError("Unexpected state - cast_to is `APIResponse`")
 
-        if inspect.isclass(origin) and issubclass(origin, httpx.Response):
+        if inspect.isclass(
+            origin  # pyright: ignore[reportUnknownArgumentType]
+        ) and issubclass(origin, httpx.Response):
             # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response
             # and pass that class to our request functions. We cannot change the variance to be either
             # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct
@@ -279,7 +281,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
                 raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
             return cast(R, response)
 
-        if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
+        if (
+            inspect.isclass(
+                origin  # pyright: ignore[reportUnknownArgumentType]
+            )
+            and not issubclass(origin, BaseModel)
+            and issubclass(origin, pydantic.BaseModel)
+        ):
             raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
 
         if (
diff --git a/src/openai/_response.py b/src/openai/_response.py
index 1527446585..36c7ea1281 100644
--- a/src/openai/_response.py
+++ b/src/openai/_response.py
@@ -214,7 +214,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
                 raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`")
             return cast(R, response)
 
-        if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel):
+        if (
+            inspect.isclass(
+                origin  # pyright: ignore[reportUnknownArgumentType]
+            )
+            and not issubclass(origin, BaseModel)
+            and issubclass(origin, pydantic.BaseModel)
+        ):
             raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`")
 
         if (

From 6d3513c86f6e5800f8f73a45e089b7a205327121 Mon Sep 17 00:00:00 2001
From: Rohit Joshi <891456+rjoshi@users.noreply.github.com>
Date: Thu, 16 Jan 2025 04:46:22 -0800
Subject: [PATCH 09/11] fix(structured outputs): avoid parsing empty empty
 content (#2023)

Fixing https://github.com/openai/openai-python/issues/1763 where parsing often fails when content is empty string instead of None.
---
 src/openai/lib/_parsing/_completions.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py
index f1fa9f2b55..33c4ccb946 100644
--- a/src/openai/lib/_parsing/_completions.py
+++ b/src/openai/lib/_parsing/_completions.py
@@ -157,7 +157,7 @@ def maybe_parse_content(
     response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven,
     message: ChatCompletionMessage | ParsedChatCompletionMessage[object],
 ) -> ResponseFormatT | None:
-    if has_rich_response_format(response_format) and message.content is not None and not message.refusal:
+    if has_rich_response_format(response_format) and message.content and not message.refusal:
         return _parse_content(response_format, message.content)
 
     return None

From 2f4f0b374207f162060c328b71ec995049dc42e8 Mon Sep 17 00:00:00 2001
From: kanchi <17161397+KanchiShimono@users.noreply.github.com>
Date: Fri, 17 Jan 2025 20:40:26 +0900
Subject: [PATCH 10/11] fix(structured outputs): correct schema coercion for
 inline ref expansion (#2025)

---
 src/openai/lib/_pydantic.py |   3 +
 tests/lib/test_pydantic.py  | 174 ++++++++++++++++++++++++++++++++++++
 2 files changed, 177 insertions(+)

diff --git a/src/openai/lib/_pydantic.py b/src/openai/lib/_pydantic.py
index 4e8bc772be..c2d73e5fc6 100644
--- a/src/openai/lib/_pydantic.py
+++ b/src/openai/lib/_pydantic.py
@@ -108,6 +108,9 @@ def _ensure_strict_json_schema(
         # properties from the json schema take priority over the ones on the `$ref`
         json_schema.update({**resolved, **json_schema})
         json_schema.pop("$ref")
+        # Since the schema expanded from `$ref` might not have `additionalProperties: false` applied,
+        # we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid.
+        return _ensure_strict_json_schema(json_schema, path=path, root=root)
 
     return json_schema
 
diff --git a/tests/lib/test_pydantic.py b/tests/lib/test_pydantic.py
index 99b9e96d21..7e128b70c0 100644
--- a/tests/lib/test_pydantic.py
+++ b/tests/lib/test_pydantic.py
@@ -7,6 +7,7 @@
 
 import openai
 from openai._compat import PYDANTIC_V2
+from openai.lib._pydantic import to_strict_json_schema
 
 from .schema_types.query import Query
 
@@ -235,3 +236,176 @@ def test_enums() -> None:
                 },
             }
         )
+
+
+class Star(BaseModel):
+    name: str = Field(description="The name of the star.")
+
+
+class Galaxy(BaseModel):
+    name: str = Field(description="The name of the galaxy.")
+    largest_star: Star = Field(description="The largest star in the galaxy.")
+
+
+class Universe(BaseModel):
+    name: str = Field(description="The name of the universe.")
+    galaxy: Galaxy = Field(description="A galaxy in the universe.")
+
+
+def test_nested_inline_ref_expansion() -> None:
+    if PYDANTIC_V2:
+        assert to_strict_json_schema(Universe) == snapshot(
+            {
+                "title": "Universe",
+                "type": "object",
+                "$defs": {
+                    "Star": {
+                        "title": "Star",
+                        "type": "object",
+                        "properties": {
+                            "name": {
+                                "type": "string",
+                                "title": "Name",
+                                "description": "The name of the star.",
+                            }
+                        },
+                        "required": ["name"],
+                        "additionalProperties": False,
+                    },
+                    "Galaxy": {
+                        "title": "Galaxy",
+                        "type": "object",
+                        "properties": {
+                            "name": {
+                                "type": "string",
+                                "title": "Name",
+                                "description": "The name of the galaxy.",
+                            },
+                            "largest_star": {
+                                "title": "Star",
+                                "type": "object",
+                                "properties": {
+                                    "name": {
+                                        "type": "string",
+                                        "title": "Name",
+                                        "description": "The name of the star.",
+                                    }
+                                },
+                                "required": ["name"],
+                                "description": "The largest star in the galaxy.",
+                                "additionalProperties": False,
+                            },
+                        },
+                        "required": ["name", "largest_star"],
+                        "additionalProperties": False,
+                    },
+                },
+                "properties": {
+                    "name": {
+                        "type": "string",
+                        "title": "Name",
+                        "description": "The name of the universe.",
+                    },
+                    "galaxy": {
+                        "title": "Galaxy",
+                        "type": "object",
+                        "properties": {
+                            "name": {
+                                "type": "string",
+                                "title": "Name",
+                                "description": "The name of the galaxy.",
+                            },
+                            "largest_star": {
+                                "title": "Star",
+                                "type": "object",
+                                "properties": {
+                                    "name": {
+                                        "type": "string",
+                                        "title": "Name",
+                                        "description": "The name of the star.",
+                                    }
+                                },
+                                "required": ["name"],
+                                "description": "The largest star in the galaxy.",
+                                "additionalProperties": False,
+                            },
+                        },
+                        "required": ["name", "largest_star"],
+                        "description": "A galaxy in the universe.",
+                        "additionalProperties": False,
+                    },
+                },
+                "required": ["name", "galaxy"],
+                "additionalProperties": False,
+            }
+        )
+    else:
+        assert to_strict_json_schema(Universe) == snapshot(
+            {
+                "title": "Universe",
+                "type": "object",
+                "definitions": {
+                    "Star": {
+                        "title": "Star",
+                        "type": "object",
+                        "properties": {
+                            "name": {"title": "Name", "description": "The name of the star.", "type": "string"}
+                        },
+                        "required": ["name"],
+                        "additionalProperties": False,
+                    },
+                    "Galaxy": {
+                        "title": "Galaxy",
+                        "type": "object",
+                        "properties": {
+                            "name": {"title": "Name", "description": "The name of the galaxy.", "type": "string"},
+                            "largest_star": {
+                                "title": "Largest Star",
+                                "description": "The largest star in the galaxy.",
+                                "type": "object",
+                                "properties": {
+                                    "name": {"title": "Name", "description": "The name of the star.", "type": "string"}
+                                },
+                                "required": ["name"],
+                                "additionalProperties": False,
+                            },
+                        },
+                        "required": ["name", "largest_star"],
+                        "additionalProperties": False,
+                    },
+                },
+                "properties": {
+                    "name": {
+                        "title": "Name",
+                        "description": "The name of the universe.",
+                        "type": "string",
+                    },
+                    "galaxy": {
+                        "title": "Galaxy",
+                        "description": "A galaxy in the universe.",
+                        "type": "object",
+                        "properties": {
+                            "name": {
+                                "title": "Name",
+                                "description": "The name of the galaxy.",
+                                "type": "string",
+                            },
+                            "largest_star": {
+                                "title": "Largest Star",
+                                "description": "The largest star in the galaxy.",
+                                "type": "object",
+                                "properties": {
+                                    "name": {"title": "Name", "description": "The name of the star.", "type": "string"}
+                                },
+                                "required": ["name"],
+                                "additionalProperties": False,
+                            },
+                        },
+                        "required": ["name", "largest_star"],
+                        "additionalProperties": False,
+                    },
+                },
+                "required": ["name", "galaxy"],
+                "additionalProperties": False,
+            }
+        )

From 5ec8a33908b4b734ecc754ff19d1f540ec8a409a Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 17 Jan 2025 11:40:58 +0000
Subject: [PATCH 11/11] release: 1.59.8

---
 .release-please-manifest.json |  2 +-
 CHANGELOG.md                  | 24 ++++++++++++++++++++++++
 pyproject.toml                |  2 +-
 src/openai/_version.py        |  2 +-
 4 files changed, 27 insertions(+), 3 deletions(-)

diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 7da3bd4caf..58f8a4601d 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.59.7"
+  ".": "1.59.8"
 }
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 08674b4a36..9f301cedff 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
 # Changelog
 
+## 1.59.8 (2025-01-17)
+
+Full Changelog: [v1.59.7...v1.59.8](https://github.com/openai/openai-python/compare/v1.59.7...v1.59.8)
+
+### Bug Fixes
+
+* streaming ([c16f58e](https://github.com/openai/openai-python/commit/c16f58ead0bc85055b164182689ba74b7e939dfa))
+* **structured outputs:** avoid parsing empty empty content ([#2023](https://github.com/openai/openai-python/issues/2023)) ([6d3513c](https://github.com/openai/openai-python/commit/6d3513c86f6e5800f8f73a45e089b7a205327121))
+* **structured outputs:** correct schema coercion for inline ref expansion ([#2025](https://github.com/openai/openai-python/issues/2025)) ([2f4f0b3](https://github.com/openai/openai-python/commit/2f4f0b374207f162060c328b71ec995049dc42e8))
+* **types:** correct type for vector store chunking strategy ([#2017](https://github.com/openai/openai-python/issues/2017)) ([e389279](https://github.com/openai/openai-python/commit/e38927950a5cdad99065853fe7b72aad6bb322e9))
+
+
+### Chores
+
+* **examples:** update realtime model ([f26746c](https://github.com/openai/openai-python/commit/f26746cbcd893d66cf8a3fd68a7c3779dc8c833c)), closes [#2020](https://github.com/openai/openai-python/issues/2020)
+* **internal:** bump pyright dependency ([#2021](https://github.com/openai/openai-python/issues/2021)) ([0a9a0f5](https://github.com/openai/openai-python/commit/0a9a0f5d8b9d5457643798287f893305006dd518))
+* **internal:** streaming refactors ([#2012](https://github.com/openai/openai-python/issues/2012)) ([d76a748](https://github.com/openai/openai-python/commit/d76a748f606743407f94dfc26758095560e2082a))
+* **internal:** update deps ([#2015](https://github.com/openai/openai-python/issues/2015)) ([514e0e4](https://github.com/openai/openai-python/commit/514e0e415f87ab4510262d29ed6125384e017b84))
+
+
+### Documentation
+
+* **examples/azure:** example script with realtime API ([#1967](https://github.com/openai/openai-python/issues/1967)) ([84f2f9c](https://github.com/openai/openai-python/commit/84f2f9c0439229a7db7136fe78419292d34d1f81))
+
 ## 1.59.7 (2025-01-13)
 
 Full Changelog: [v1.59.6...v1.59.7](https://github.com/openai/openai-python/compare/v1.59.6...v1.59.7)
diff --git a/pyproject.toml b/pyproject.toml
index e769f4a95f..a75d24e1eb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.59.7"
+version = "1.59.8"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 656d17ff63..d6f55997e7 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.59.7"  # x-release-please-version
+__version__ = "1.59.8"  # x-release-please-version