From babe65f92c8a71efdc8adb7e68205d5906f571cf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:20:02 +0000 Subject: [PATCH 01/24] chore(internal): streaming refactors (#2012) --- src/openai/_streaming.py | 66 +++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 34 deletions(-) diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index 0fda992cff..b275986a46 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -59,23 +59,22 @@ def __stream__(self) -> Iterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) else: data = sse.json() @@ -161,23 +160,22 @@ async def __stream__(self) -> AsyncIterator[_T]: if sse.data.startswith("[DONE]"): break - if sse.event is None: - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) else: data = sse.json() From 7e88d42289f1327540c3d3f9210fae3c61474053 Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 13 Jan 2025 16:59:43 +0000 Subject: [PATCH 02/24] fix: streaming --- src/openai/_streaming.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index b275986a46..7aa7b62f6b 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -76,25 +76,6 @@ def __stream__(self) -> Iterator[_T]: yield process_data(data=data, cast_to=cast_to, response=response) - else: - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) - # Ensure the entire stream is consumed for _sse in iterator: ... @@ -177,25 +158,6 @@ async def __stream__(self) -> AsyncIterator[_T]: yield process_data(data=data, cast_to=cast_to, response=response) - else: - data = sse.json() - - if sse.event == "error" and is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) - # Ensure the entire stream is consumed async for _sse in iterator: ... From 82ccc9858ac0de22ac874dcf796c17a333ce7b00 Mon Sep 17 00:00:00 2001 From: Krista Pratico Date: Mon, 13 Jan 2025 10:08:10 -0800 Subject: [PATCH 03/24] docs(examples/azure): example script with realtime API (#1967) --- examples/realtime/azure_realtime.py | 57 +++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 examples/realtime/azure_realtime.py diff --git a/examples/realtime/azure_realtime.py b/examples/realtime/azure_realtime.py new file mode 100644 index 0000000000..de88d47052 --- /dev/null +++ b/examples/realtime/azure_realtime.py @@ -0,0 +1,57 @@ +import os +import asyncio + +from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider + +from openai import AsyncAzureOpenAI + +# Azure OpenAI Realtime Docs + +# How-to: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio +# Supported models and API versions: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio#supported-models +# Entra ID auth: https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity + + +async def main() -> None: + """The following example demonstrates how to configure Azure OpenAI to use the Realtime API. + For an audio example, see push_to_talk_app.py and update the client and model parameter accordingly. + + When prompted for user input, type a message and hit enter to send it to the model. + Enter "q" to quit the conversation. + """ + + credential = DefaultAzureCredential() + client = AsyncAzureOpenAI( + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + azure_ad_token_provider=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default"), + api_version="2024-10-01-preview", + ) + async with client.beta.realtime.connect( + model="gpt-4o-realtime-preview", # deployment name for your model + ) as connection: + await connection.session.update(session={"modalities": ["text"]}) # type: ignore + while True: + user_input = input("Enter a message: ") + if user_input == "q": + break + + await connection.conversation.item.create( + item={ + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": user_input}], + } + ) + await connection.response.create() + async for event in connection: + if event.type == "response.text.delta": + print(event.delta, flush=True, end="") + elif event.type == "response.text.done": + print() + elif event.type == "response.done": + break + + await credential.close() + + +asyncio.run(main()) From e081d99e16835a038ef0dbb68500e562e021291f Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Mon, 13 Jan 2025 20:23:17 +0000 Subject: [PATCH 04/24] Revert "chore(internal): streaming refactors (#2012)" This reverts commit d76a748f606743407f94dfc26758095560e2082a. --- src/openai/_streaming.py | 104 +++++++++++++++++++++++++++------------ 1 file changed, 72 insertions(+), 32 deletions(-) diff --git a/src/openai/_streaming.py b/src/openai/_streaming.py index 7aa7b62f6b..0fda992cff 100644 --- a/src/openai/_streaming.py +++ b/src/openai/_streaming.py @@ -59,22 +59,42 @@ def __stream__(self) -> Iterator[_T]: if sse.data.startswith("[DONE]"): break - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) + if sse.event is None: + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) + + else: + data = sse.json() + + if sse.event == "error" and is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) # Ensure the entire stream is consumed for _sse in iterator: @@ -141,22 +161,42 @@ async def __stream__(self) -> AsyncIterator[_T]: if sse.data.startswith("[DONE]"): break - data = sse.json() - if is_mapping(data) and data.get("error"): - message = None - error = data.get("error") - if is_mapping(error): - message = error.get("message") - if not message or not isinstance(message, str): - message = "An error occurred during streaming" - - raise APIError( - message=message, - request=self.response.request, - body=data["error"], - ) - - yield process_data(data=data, cast_to=cast_to, response=response) + if sse.event is None: + data = sse.json() + if is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data=data, cast_to=cast_to, response=response) + + else: + data = sse.json() + + if sse.event == "error" and is_mapping(data) and data.get("error"): + message = None + error = data.get("error") + if is_mapping(error): + message = error.get("message") + if not message or not isinstance(message, str): + message = "An error occurred during streaming" + + raise APIError( + message=message, + request=self.response.request, + body=data["error"], + ) + + yield process_data(data={"data": data, "event": sse.event}, cast_to=cast_to, response=response) # Ensure the entire stream is consumed async for _sse in iterator: From 83f11490fa291f9814f3dae6a65b1f62d0177675 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:56:21 +0000 Subject: [PATCH 05/24] chore(internal): update deps (#2015) --- mypy.ini | 2 +- requirements-dev.lock | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mypy.ini b/mypy.ini index 1ea1fe909d..660f1a086e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -44,7 +44,7 @@ cache_fine_grained = True # ``` # Changing this codegen to make mypy happy would increase complexity # and would not be worth it. -disable_error_code = func-returns-value +disable_error_code = func-returns-value,overload-cannot-match # https://github.com/python/mypy/issues/12162 [mypy.overrides] diff --git a/requirements-dev.lock b/requirements-dev.lock index 15ecbf081a..8799e10b06 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -83,7 +83,7 @@ msal==1.31.0 # via msal-extensions msal-extensions==1.2.0 # via azure-identity -mypy==1.13.0 +mypy==1.14.1 mypy-extensions==1.0.0 # via black # via mypy @@ -124,7 +124,7 @@ pygments==2.18.0 # via rich pyjwt==2.8.0 # via msal -pyright==1.1.390 +pyright==1.1.391 pytest==8.3.3 # via pytest-asyncio pytest-asyncio==0.24.0 From d256d83589261e3bb2d2778a4f5bd4dda3248ac9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 11:59:51 +0000 Subject: [PATCH 06/24] fix(types): correct type for vector store chunking strategy (#2017) --- api.md | 2 +- src/openai/types/beta/__init__.py | 3 +++ .../types/beta/file_chunking_strategy_param.py | 4 ++-- ...static_file_chunking_strategy_object_param.py | 16 ++++++++++++++++ 4 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 src/openai/types/beta/static_file_chunking_strategy_object_param.py diff --git a/api.md b/api.md index ace93e0559..1edd3f6589 100644 --- a/api.md +++ b/api.md @@ -314,7 +314,7 @@ from openai.types.beta import ( OtherFileChunkingStrategyObject, StaticFileChunkingStrategy, StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, + StaticFileChunkingStrategyObjectParam, VectorStore, VectorStoreDeleted, ) diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index 7f76fed0cd..b9ea792bfa 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -43,3 +43,6 @@ from .assistant_response_format_option_param import ( AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam, ) +from .static_file_chunking_strategy_object_param import ( + StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, +) diff --git a/src/openai/types/beta/file_chunking_strategy_param.py b/src/openai/types/beta/file_chunking_strategy_param.py index 46383358e5..25d94286d8 100644 --- a/src/openai/types/beta/file_chunking_strategy_param.py +++ b/src/openai/types/beta/file_chunking_strategy_param.py @@ -6,8 +6,8 @@ from typing_extensions import TypeAlias from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam -from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam +from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam __all__ = ["FileChunkingStrategyParam"] -FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam] +FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam] diff --git a/src/openai/types/beta/static_file_chunking_strategy_object_param.py b/src/openai/types/beta/static_file_chunking_strategy_object_param.py new file mode 100644 index 0000000000..0cdf35c0df --- /dev/null +++ b/src/openai/types/beta/static_file_chunking_strategy_object_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam + +__all__ = ["StaticFileChunkingStrategyObjectParam"] + + +class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): + static: Required[StaticFileChunkingStrategyParam] + + type: Required[Literal["static"]] + """Always `static`.""" From fd76342499f061bb157e4d5501a1e55f867cbc2c Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 15 Jan 2025 14:06:06 +0000 Subject: [PATCH 07/24] chore(examples): update realtime model closes #2020 --- README.md | 4 ++-- examples/realtime/push_to_talk_app.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ad1c9afd10..ec556bd27a 100644 --- a/README.md +++ b/README.md @@ -275,7 +275,7 @@ from openai import AsyncOpenAI async def main(): client = AsyncOpenAI() - async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection: + async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: await connection.session.update(session={'modalities': ['text']}) await connection.conversation.item.create( @@ -309,7 +309,7 @@ Whenever an error occurs, the Realtime API will send an [`error` event](https:// ```py client = AsyncOpenAI() -async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection: +async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: ... async for event in connection: if event.type == 'error': diff --git a/examples/realtime/push_to_talk_app.py b/examples/realtime/push_to_talk_app.py index d46945a8ed..8dc303a83a 100755 --- a/examples/realtime/push_to_talk_app.py +++ b/examples/realtime/push_to_talk_app.py @@ -152,7 +152,7 @@ async def on_mount(self) -> None: self.run_worker(self.send_mic_audio()) async def handle_realtime_connection(self) -> None: - async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as conn: + async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview") as conn: self.connection = conn self.connected.set() From bf5bebb57d74266d3010a72267298c0c832510a9 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 17:51:10 +0000 Subject: [PATCH 08/24] chore(internal): bump pyright dependency (#2021) --- requirements-dev.lock | 2 +- src/openai/_legacy_response.py | 12 ++++++++++-- src/openai/_response.py | 8 +++++++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index 8799e10b06..ef26591f12 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -124,7 +124,7 @@ pygments==2.18.0 # via rich pyjwt==2.8.0 # via msal -pyright==1.1.391 +pyright==1.1.392.post0 pytest==8.3.3 # via pytest-asyncio pytest-asyncio==0.24.0 diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index 7a14f27adb..25680049dc 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -269,7 +269,9 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if origin == LegacyAPIResponse: raise RuntimeError("Unexpected state - cast_to is `APIResponse`") - if inspect.isclass(origin) and issubclass(origin, httpx.Response): + if inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) and issubclass(origin, httpx.Response): # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response # and pass that class to our request functions. We cannot change the variance to be either # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct @@ -279,7 +281,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( diff --git a/src/openai/_response.py b/src/openai/_response.py index 1527446585..36c7ea1281 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -214,7 +214,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( From 08ab62b93a0c7690f00e760172cfefcb81b8e150 Mon Sep 17 00:00:00 2001 From: Rohit Joshi <891456+rjoshi@users.noreply.github.com> Date: Thu, 16 Jan 2025 04:46:22 -0800 Subject: [PATCH 09/24] fix(structured outputs): avoid parsing empty empty content (#2023) Fixing https://github.com/openai/openai-python/issues/1763 where parsing often fails when content is empty string instead of None. --- src/openai/lib/_parsing/_completions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py index f1fa9f2b55..33c4ccb946 100644 --- a/src/openai/lib/_parsing/_completions.py +++ b/src/openai/lib/_parsing/_completions.py @@ -157,7 +157,7 @@ def maybe_parse_content( response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, message: ChatCompletionMessage | ParsedChatCompletionMessage[object], ) -> ResponseFormatT | None: - if has_rich_response_format(response_format) and message.content is not None and not message.refusal: + if has_rich_response_format(response_format) and message.content and not message.refusal: return _parse_content(response_format, message.content) return None From 1bdabb489a86bd721a3e237d8556583ed0d8dfa1 Mon Sep 17 00:00:00 2001 From: kanchi <17161397+KanchiShimono@users.noreply.github.com> Date: Fri, 17 Jan 2025 20:40:26 +0900 Subject: [PATCH 10/24] fix(structured outputs): correct schema coercion for inline ref expansion (#2025) --- src/openai/lib/_pydantic.py | 3 + tests/lib/test_pydantic.py | 174 ++++++++++++++++++++++++++++++++++++ 2 files changed, 177 insertions(+) diff --git a/src/openai/lib/_pydantic.py b/src/openai/lib/_pydantic.py index 4e8bc772be..c2d73e5fc6 100644 --- a/src/openai/lib/_pydantic.py +++ b/src/openai/lib/_pydantic.py @@ -108,6 +108,9 @@ def _ensure_strict_json_schema( # properties from the json schema take priority over the ones on the `$ref` json_schema.update({**resolved, **json_schema}) json_schema.pop("$ref") + # Since the schema expanded from `$ref` might not have `additionalProperties: false` applied, + # we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid. + return _ensure_strict_json_schema(json_schema, path=path, root=root) return json_schema diff --git a/tests/lib/test_pydantic.py b/tests/lib/test_pydantic.py index 99b9e96d21..7e128b70c0 100644 --- a/tests/lib/test_pydantic.py +++ b/tests/lib/test_pydantic.py @@ -7,6 +7,7 @@ import openai from openai._compat import PYDANTIC_V2 +from openai.lib._pydantic import to_strict_json_schema from .schema_types.query import Query @@ -235,3 +236,176 @@ def test_enums() -> None: }, } ) + + +class Star(BaseModel): + name: str = Field(description="The name of the star.") + + +class Galaxy(BaseModel): + name: str = Field(description="The name of the galaxy.") + largest_star: Star = Field(description="The largest star in the galaxy.") + + +class Universe(BaseModel): + name: str = Field(description="The name of the universe.") + galaxy: Galaxy = Field(description="A galaxy in the universe.") + + +def test_nested_inline_ref_expansion() -> None: + if PYDANTIC_V2: + assert to_strict_json_schema(Universe) == snapshot( + { + "title": "Universe", + "type": "object", + "$defs": { + "Star": { + "title": "Star", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the star.", + } + }, + "required": ["name"], + "additionalProperties": False, + }, + "Galaxy": { + "title": "Galaxy", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the galaxy.", + }, + "largest_star": { + "title": "Star", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the star.", + } + }, + "required": ["name"], + "description": "The largest star in the galaxy.", + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "additionalProperties": False, + }, + }, + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the universe.", + }, + "galaxy": { + "title": "Galaxy", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the galaxy.", + }, + "largest_star": { + "title": "Star", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the star.", + } + }, + "required": ["name"], + "description": "The largest star in the galaxy.", + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "description": "A galaxy in the universe.", + "additionalProperties": False, + }, + }, + "required": ["name", "galaxy"], + "additionalProperties": False, + } + ) + else: + assert to_strict_json_schema(Universe) == snapshot( + { + "title": "Universe", + "type": "object", + "definitions": { + "Star": { + "title": "Star", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the star.", "type": "string"} + }, + "required": ["name"], + "additionalProperties": False, + }, + "Galaxy": { + "title": "Galaxy", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the galaxy.", "type": "string"}, + "largest_star": { + "title": "Largest Star", + "description": "The largest star in the galaxy.", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the star.", "type": "string"} + }, + "required": ["name"], + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "additionalProperties": False, + }, + }, + "properties": { + "name": { + "title": "Name", + "description": "The name of the universe.", + "type": "string", + }, + "galaxy": { + "title": "Galaxy", + "description": "A galaxy in the universe.", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "The name of the galaxy.", + "type": "string", + }, + "largest_star": { + "title": "Largest Star", + "description": "The largest star in the galaxy.", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the star.", "type": "string"} + }, + "required": ["name"], + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "additionalProperties": False, + }, + }, + "required": ["name", "galaxy"], + "additionalProperties": False, + } + ) From 4dd5cf25fa58076307a5baed0a87f6d765f20100 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 17 Jan 2025 11:40:58 +0000 Subject: [PATCH 11/24] release: 1.59.8 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 24 ++++++++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7da3bd4caf..58f8a4601d 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.59.7" + ".": "1.59.8" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 08674b4a36..9f301cedff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 1.59.8 (2025-01-17) + +Full Changelog: [v1.59.7...v1.59.8](https://github.com/openai/openai-python/compare/v1.59.7...v1.59.8) + +### Bug Fixes + +* streaming ([c16f58e](https://github.com/openai/openai-python/commit/c16f58ead0bc85055b164182689ba74b7e939dfa)) +* **structured outputs:** avoid parsing empty empty content ([#2023](https://github.com/openai/openai-python/issues/2023)) ([6d3513c](https://github.com/openai/openai-python/commit/6d3513c86f6e5800f8f73a45e089b7a205327121)) +* **structured outputs:** correct schema coercion for inline ref expansion ([#2025](https://github.com/openai/openai-python/issues/2025)) ([2f4f0b3](https://github.com/openai/openai-python/commit/2f4f0b374207f162060c328b71ec995049dc42e8)) +* **types:** correct type for vector store chunking strategy ([#2017](https://github.com/openai/openai-python/issues/2017)) ([e389279](https://github.com/openai/openai-python/commit/e38927950a5cdad99065853fe7b72aad6bb322e9)) + + +### Chores + +* **examples:** update realtime model ([f26746c](https://github.com/openai/openai-python/commit/f26746cbcd893d66cf8a3fd68a7c3779dc8c833c)), closes [#2020](https://github.com/openai/openai-python/issues/2020) +* **internal:** bump pyright dependency ([#2021](https://github.com/openai/openai-python/issues/2021)) ([0a9a0f5](https://github.com/openai/openai-python/commit/0a9a0f5d8b9d5457643798287f893305006dd518)) +* **internal:** streaming refactors ([#2012](https://github.com/openai/openai-python/issues/2012)) ([d76a748](https://github.com/openai/openai-python/commit/d76a748f606743407f94dfc26758095560e2082a)) +* **internal:** update deps ([#2015](https://github.com/openai/openai-python/issues/2015)) ([514e0e4](https://github.com/openai/openai-python/commit/514e0e415f87ab4510262d29ed6125384e017b84)) + + +### Documentation + +* **examples/azure:** example script with realtime API ([#1967](https://github.com/openai/openai-python/issues/1967)) ([84f2f9c](https://github.com/openai/openai-python/commit/84f2f9c0439229a7db7136fe78419292d34d1f81)) + ## 1.59.7 (2025-01-13) Full Changelog: [v1.59.6...v1.59.7](https://github.com/openai/openai-python/compare/v1.59.6...v1.59.7) diff --git a/pyproject.toml b/pyproject.toml index e769f4a95f..a75d24e1eb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.59.7" +version = "1.59.8" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 656d17ff63..d6f55997e7 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.59.7" # x-release-please-version +__version__ = "1.59.8" # x-release-please-version From 1e5e19976a02f4f3423cf7e32ad5fa020c857b82 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 09:38:30 +0000 Subject: [PATCH 12/24] chore(internal): update websockets dep (#2036) --- requirements-dev.lock | 2 +- requirements.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-dev.lock b/requirements-dev.lock index ef26591f12..38cc6e1cf2 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -184,7 +184,7 @@ urllib3==2.2.1 # via requests virtualenv==20.24.5 # via nox -websockets==14.1 +websockets==14.2 # via openai zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index a3e3602abe..cbdff94fa3 100644 --- a/requirements.lock +++ b/requirements.lock @@ -63,5 +63,5 @@ typing-extensions==4.12.2 # via pydantic-core tzdata==2024.1 # via pandas -websockets==14.1 +websockets==14.2 # via openai From fbc88b6206338761019cb6a4258d7de46f578fcb Mon Sep 17 00:00:00 2001 From: Nino Risteski <95188570+NinoRisteski@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:53:29 +0100 Subject: [PATCH 13/24] docs: fix typo (#2031) removed duplicate 'the' twice --- src/openai/resources/chat/chat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/openai/resources/chat/chat.py b/src/openai/resources/chat/chat.py index dc23a15a8e..9c4aacc953 100644 --- a/src/openai/resources/chat/chat.py +++ b/src/openai/resources/chat/chat.py @@ -24,7 +24,7 @@ def completions(self) -> Completions: @cached_property def with_raw_response(self) -> ChatWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -49,7 +49,7 @@ def completions(self) -> AsyncCompletions: @cached_property def with_raw_response(self) -> AsyncChatWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers From de0550420763b918ffe49d2fffd7b76b2dd00ba8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:13:38 +0000 Subject: [PATCH 14/24] docs(raw responses): fix duplicate `the` (#2039) --- src/openai/resources/audio/audio.py | 4 ++-- src/openai/resources/audio/speech.py | 4 ++-- src/openai/resources/audio/transcriptions.py | 4 ++-- src/openai/resources/audio/translations.py | 4 ++-- src/openai/resources/batches.py | 4 ++-- src/openai/resources/beta/assistants.py | 4 ++-- src/openai/resources/beta/beta.py | 4 ++-- src/openai/resources/beta/realtime/realtime.py | 4 ++-- src/openai/resources/beta/realtime/sessions.py | 4 ++-- src/openai/resources/beta/threads/messages.py | 4 ++-- src/openai/resources/beta/threads/runs/runs.py | 4 ++-- src/openai/resources/beta/threads/runs/steps.py | 4 ++-- src/openai/resources/beta/threads/threads.py | 4 ++-- src/openai/resources/beta/vector_stores/file_batches.py | 4 ++-- src/openai/resources/beta/vector_stores/files.py | 4 ++-- src/openai/resources/beta/vector_stores/vector_stores.py | 4 ++-- src/openai/resources/chat/completions.py | 4 ++-- src/openai/resources/completions.py | 4 ++-- src/openai/resources/embeddings.py | 4 ++-- src/openai/resources/files.py | 4 ++-- src/openai/resources/fine_tuning/fine_tuning.py | 4 ++-- src/openai/resources/fine_tuning/jobs/checkpoints.py | 4 ++-- src/openai/resources/fine_tuning/jobs/jobs.py | 4 ++-- src/openai/resources/images.py | 4 ++-- src/openai/resources/models.py | 4 ++-- src/openai/resources/moderations.py | 4 ++-- src/openai/resources/uploads/parts.py | 4 ++-- src/openai/resources/uploads/uploads.py | 4 ++-- 28 files changed, 56 insertions(+), 56 deletions(-) diff --git a/src/openai/resources/audio/audio.py b/src/openai/resources/audio/audio.py index 18bd7b812c..383b7073bf 100644 --- a/src/openai/resources/audio/audio.py +++ b/src/openai/resources/audio/audio.py @@ -48,7 +48,7 @@ def speech(self) -> Speech: @cached_property def with_raw_response(self) -> AudioWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -81,7 +81,7 @@ def speech(self) -> AsyncSpeech: @cached_property def with_raw_response(self) -> AsyncAudioWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 09faaddda6..805a8c19c9 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -32,7 +32,7 @@ class Speech(SyncAPIResource): @cached_property def with_raw_response(self) -> SpeechWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -116,7 +116,7 @@ class AsyncSpeech(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSpeechWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 8b5f4404fc..341446c43a 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -36,7 +36,7 @@ class Transcriptions(SyncAPIResource): @cached_property def with_raw_response(self) -> TranscriptionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -200,7 +200,7 @@ class AsyncTranscriptions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index a2d28afa03..cd3132dc57 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -36,7 +36,7 @@ class Translations(SyncAPIResource): @cached_property def with_raw_response(self) -> TranslationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -179,7 +179,7 @@ class AsyncTranslations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranslationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 7cab75785d..4a887642e9 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -31,7 +31,7 @@ class Batches(SyncAPIResource): @cached_property def with_raw_response(self) -> BatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -236,7 +236,7 @@ class AsyncBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 7df212f155..2f2482b648 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -36,7 +36,7 @@ class Assistants(SyncAPIResource): @cached_property def with_raw_response(self) -> AssistantsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -422,7 +422,7 @@ class AsyncAssistants(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncAssistantsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 1ffa6c8e79..5d71cff3f1 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -65,7 +65,7 @@ def threads(self) -> Threads: @cached_property def with_raw_response(self) -> BetaWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -106,7 +106,7 @@ def threads(self) -> AsyncThreads: @cached_property def with_raw_response(self) -> AsyncBetaWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index b39b410ecf..235790a9f5 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -58,7 +58,7 @@ def sessions(self) -> Sessions: @cached_property def with_raw_response(self) -> RealtimeWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -110,7 +110,7 @@ def sessions(self) -> AsyncSessions: @cached_property def with_raw_response(self) -> AsyncRealtimeWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 1d1ee701e5..8d2df30753 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -27,7 +27,7 @@ class Sessions(SyncAPIResource): @cached_property def with_raw_response(self) -> SessionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -166,7 +166,7 @@ class AsyncSessions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSessionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index e848507387..f780f6f558 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -33,7 +33,7 @@ class Messages(SyncAPIResource): @cached_property def with_raw_response(self) -> MessagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -307,7 +307,7 @@ class AsyncMessages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncMessagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 0418d570ba..f32a08f235 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -64,7 +64,7 @@ def steps(self) -> Steps: @cached_property def with_raw_response(self) -> RunsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -1429,7 +1429,7 @@ def steps(self) -> AsyncSteps: @cached_property def with_raw_response(self) -> AsyncRunsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 9bd91e39e0..709c729d45 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -29,7 +29,7 @@ class Steps(SyncAPIResource): @cached_property def with_raw_response(self) -> StepsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -183,7 +183,7 @@ class AsyncSteps(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncStepsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index e45090abb0..186b6f63e2 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -72,7 +72,7 @@ def messages(self) -> Messages: @cached_property def with_raw_response(self) -> ThreadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -906,7 +906,7 @@ def messages(self) -> AsyncMessages: @cached_property def with_raw_response(self) -> AsyncThreadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index 9f9e643bd0..6d61e92c7f 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -36,7 +36,7 @@ class FileBatches(SyncAPIResource): @cached_property def with_raw_response(self) -> FileBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -365,7 +365,7 @@ class AsyncFileBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFileBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index 7c155ac917..febf27a753 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -32,7 +32,7 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -344,7 +344,7 @@ class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 61a2eadc7b..6b44c602f1 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -59,7 +59,7 @@ def file_batches(self) -> FileBatches: @cached_property def with_raw_response(self) -> VectorStoresWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -337,7 +337,7 @@ def file_batches(self) -> AsyncFileBatches: @cached_property def with_raw_response(self) -> AsyncVectorStoresWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 728c744327..201ae3f4c6 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -45,7 +45,7 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -906,7 +906,7 @@ class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 1ac3575fd5..171f509352 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -32,7 +32,7 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -574,7 +574,7 @@ class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 4ab2278e89..81a3e354e6 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -27,7 +27,7 @@ class Embeddings(SyncAPIResource): @cached_property def with_raw_response(self) -> EmbeddingsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -139,7 +139,7 @@ class AsyncEmbeddings(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index 6eaea1b568..af453e1e21 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -41,7 +41,7 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -357,7 +357,7 @@ class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/fine_tuning.py b/src/openai/resources/fine_tuning/fine_tuning.py index d2bce87c48..eebde07d81 100644 --- a/src/openai/resources/fine_tuning/fine_tuning.py +++ b/src/openai/resources/fine_tuning/fine_tuning.py @@ -24,7 +24,7 @@ def jobs(self) -> Jobs: @cached_property def with_raw_response(self) -> FineTuningWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -49,7 +49,7 @@ def jobs(self) -> AsyncJobs: @cached_property def with_raw_response(self) -> AsyncFineTuningWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py index 8b5e905ea5..f86462e513 100644 --- a/src/openai/resources/fine_tuning/jobs/checkpoints.py +++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py @@ -25,7 +25,7 @@ class Checkpoints(SyncAPIResource): @cached_property def with_raw_response(self) -> CheckpointsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -96,7 +96,7 @@ class AsyncCheckpoints(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCheckpointsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 78eefc253c..e023d28fea 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -44,7 +44,7 @@ def checkpoints(self) -> Checkpoints: @cached_property def with_raw_response(self) -> JobsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -342,7 +342,7 @@ def checkpoints(self) -> AsyncCheckpoints: @cached_property def with_raw_response(self) -> AsyncJobsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 2fbc077dd9..30473c14f7 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -30,7 +30,7 @@ class Images(SyncAPIResource): @cached_property def with_raw_response(self) -> ImagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -287,7 +287,7 @@ class AsyncImages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncImagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py index d6062de230..a9693a6b0a 100644 --- a/src/openai/resources/models.py +++ b/src/openai/resources/models.py @@ -24,7 +24,7 @@ class Models(SyncAPIResource): @cached_property def with_raw_response(self) -> ModelsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -137,7 +137,7 @@ class AsyncModels(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModelsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index ce80bb7d55..a8f03142bc 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -28,7 +28,7 @@ class Moderations(SyncAPIResource): @cached_property def with_raw_response(self) -> ModerationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -98,7 +98,7 @@ class AsyncModerations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModerationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py index d46e5ea1bb..777469ac8e 100644 --- a/src/openai/resources/uploads/parts.py +++ b/src/openai/resources/uploads/parts.py @@ -28,7 +28,7 @@ class Parts(SyncAPIResource): @cached_property def with_raw_response(self) -> PartsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -103,7 +103,7 @@ class AsyncParts(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncPartsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index cfb500b62c..2028decef5 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -51,7 +51,7 @@ def parts(self) -> Parts: @cached_property def with_raw_response(self) -> UploadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -344,7 +344,7 @@ def parts(self) -> AsyncParts: @cached_property def with_raw_response(self) -> AsyncUploadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers From 14543c59df4f56d2004530dfed411154ffc2f632 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 10:22:16 +0000 Subject: [PATCH 15/24] fix(tests): make test_get_platform less flaky (#2040) --- tests/test_client.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index e0d23403b1..41da2d5d04 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -6,6 +6,7 @@ import os import sys import json +import time import asyncio import inspect import subprocess @@ -1797,10 +1798,20 @@ async def test_main() -> None: [sys.executable, "-c", test_code], text=True, ) as process: - try: - process.wait(2) - if process.returncode: - raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") - except subprocess.TimeoutExpired as e: - process.kill() - raise AssertionError("calling get_platform using asyncify resulted in a hung process") from e + timeout = 10 # seconds + + start_time = time.monotonic() + while True: + return_code = process.poll() + if return_code is not None: + if return_code != 0: + raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") + + # success + break + + if time.monotonic() - start_time > timeout: + process.kill() + raise AssertionError("calling get_platform using asyncify resulted in a hung process") + + time.sleep(0.1) From 7989b045d2b974e03792e3010d5247b5bbe387fe Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:11:31 +0000 Subject: [PATCH 16/24] chore(internal): avoid pytest-asyncio deprecation warning (#2041) --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index a75d24e1eb..e5beb93ec5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -141,6 +141,7 @@ testpaths = ["tests"] addopts = "--tb=short" xfail_strict = true asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "session" filterwarnings = [ "error" ] From 348a783523e36a88e24b92faee693db125efc5bf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 12:12:04 +0000 Subject: [PATCH 17/24] release: 1.59.9 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 20 ++++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 23 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 58f8a4601d..32b4e18516 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.59.8" + ".": "1.59.9" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f301cedff..86951242c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,25 @@ # Changelog +## 1.59.9 (2025-01-20) + +Full Changelog: [v1.59.8...v1.59.9](https://github.com/openai/openai-python/compare/v1.59.8...v1.59.9) + +### Bug Fixes + +* **tests:** make test_get_platform less flaky ([#2040](https://github.com/openai/openai-python/issues/2040)) ([72ea05c](https://github.com/openai/openai-python/commit/72ea05cf18caaa7a5e6fe7e2251ab93fa0ba3140)) + + +### Chores + +* **internal:** avoid pytest-asyncio deprecation warning ([#2041](https://github.com/openai/openai-python/issues/2041)) ([b901046](https://github.com/openai/openai-python/commit/b901046ddda9c79b7f019e2263c02d126a3b2ee2)) +* **internal:** update websockets dep ([#2036](https://github.com/openai/openai-python/issues/2036)) ([642cd11](https://github.com/openai/openai-python/commit/642cd119482c6fbca925ba702ad2579f9dc47bf9)) + + +### Documentation + +* fix typo ([#2031](https://github.com/openai/openai-python/issues/2031)) ([02fcf15](https://github.com/openai/openai-python/commit/02fcf15611953089826a74725cb96201d94658bb)) +* **raw responses:** fix duplicate `the` ([#2039](https://github.com/openai/openai-python/issues/2039)) ([9b8eab9](https://github.com/openai/openai-python/commit/9b8eab99fdc6a581a1f5cc421c6f74b0e2b30415)) + ## 1.59.8 (2025-01-17) Full Changelog: [v1.59.7...v1.59.8](https://github.com/openai/openai-python/compare/v1.59.7...v1.59.8) diff --git a/pyproject.toml b/pyproject.toml index e5beb93ec5..0b142a6bd8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.59.8" +version = "1.59.9" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index d6f55997e7..7c92c3adf3 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.59.8" # x-release-please-version +__version__ = "1.59.9" # x-release-please-version From a05e8799f9bb5734b46b19d774d808457c737e31 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 16:08:31 +0000 Subject: [PATCH 18/24] chore(internal): minor style changes (#2043) --- src/openai/_legacy_response.py | 4 ++-- src/openai/_response.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index 25680049dc..8880e5f104 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -205,6 +205,8 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to and is_annotated_type(cast_to): cast_to = extract_type_arg(cast_to, 0) + origin = get_origin(cast_to) or cast_to + if self._stream: if to: if not is_stream_class_type(to): @@ -261,8 +263,6 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == bool: return cast(R, response.text.lower() == "true") - origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent): return cast(R, cast_to(response)) # type: ignore diff --git a/src/openai/_response.py b/src/openai/_response.py index 36c7ea1281..95e94e6537 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -136,6 +136,8 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to and is_annotated_type(cast_to): cast_to = extract_type_arg(cast_to, 0) + origin = get_origin(cast_to) or cast_to + if self._is_sse_stream: if to: if not is_stream_class_type(to): @@ -195,8 +197,6 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == bool: return cast(R, response.text.lower() == "true") - origin = get_origin(cast_to) or cast_to - # handle the legacy binary response case if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent": return cast(R, cast_to(response)) # type: ignore From 709926fff8761659761c3efb96c1e21a02fc1f5d Mon Sep 17 00:00:00 2001 From: Robert Craigie Date: Wed, 22 Jan 2025 10:43:52 +0000 Subject: [PATCH 19/24] docs(readme): mention failed requests in request IDs --- README.md | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/README.md b/README.md index ec556bd27a..5f7d477cc8 100644 --- a/README.md +++ b/README.md @@ -499,6 +499,21 @@ Note that unlike other properties that use an `_` prefix, the `_request_id` prop *is* public. Unless documented otherwise, *all* other `_` prefix properties, methods and modules are *private*. +> [!IMPORTANT] +> If you need to access request IDs for failed requests you must catch the `APIStatusError` exception + +```python +import openai + +try: + completion = await client.chat.completions.create( + messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4" + ) +except openai.APIStatusError as exc: + print(exc.request_id) # req_123 + raise exc +``` + ### Retries From 339d3151c5b972af690ab45f1055763c52af0e58 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:20:56 +0000 Subject: [PATCH 20/24] feat(api): update enum values, comments, and examples (#2045) --- .stats.yml | 2 +- src/openai/resources/audio/speech.py | 16 +++---- .../resources/beta/realtime/sessions.py | 48 +++++++++++-------- src/openai/resources/chat/completions.py | 18 ------- src/openai/resources/embeddings.py | 6 ++- .../types/audio/speech_create_params.py | 6 +-- .../conversation_item_create_event.py | 11 +++-- .../conversation_item_create_event_param.py | 11 +++-- src/openai/types/beta/realtime/session.py | 13 ++++- .../beta/realtime/session_create_params.py | 35 ++++++++------ .../beta/realtime/session_update_event.py | 33 ++++++++----- .../realtime/session_update_event_param.py | 33 ++++++++----- src/openai/types/chat/chat_completion.py | 6 +-- ...chat_completion_assistant_message_param.py | 4 +- .../types/chat/chat_completion_chunk.py | 6 +-- .../types/chat/completion_create_params.py | 3 -- src/openai/types/embedding_create_params.py | 3 +- .../beta/realtime/test_sessions.py | 28 ++++------- tests/api_resources/chat/test_completions.py | 8 ++-- tests/api_resources/test_completions.py | 8 ++-- 20 files changed, 152 insertions(+), 146 deletions(-) diff --git a/.stats.yml b/.stats.yml index 9600edae3b..d518bac586 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 805a8c19c9..ad01118161 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -53,7 +53,7 @@ def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -73,9 +73,9 @@ def create( One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` - voice: The voice to use when generating the audio. Supported voices are `alloy`, - `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - available in the + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, @@ -137,7 +137,7 @@ async def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -157,9 +157,9 @@ async def create( One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` - voice: The voice to use when generating the audio. Supported voices are `alloy`, - `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - available in the + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 8d2df30753..b920c89207 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -46,18 +46,19 @@ def with_streaming_response(self) -> SessionsWithStreamingResponse: def create( self, *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", - ], - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + ] + | NotGiven = NOT_GIVEN, output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, @@ -81,9 +82,9 @@ def create( the Realtime API. Args: - model: The Realtime model used for this session. - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the @@ -110,7 +111,10 @@ def create( modalities: The set of modalities the model can respond with. To disable audio, set this to ["text"]. + model: The Realtime model used for this session. + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. @@ -140,12 +144,12 @@ def create( "/realtime/sessions", body=maybe_transform( { - "model": model, "input_audio_format": input_audio_format, "input_audio_transcription": input_audio_transcription, "instructions": instructions, "max_response_output_tokens": max_response_output_tokens, "modalities": modalities, + "model": model, "output_audio_format": output_audio_format, "temperature": temperature, "tool_choice": tool_choice, @@ -185,18 +189,19 @@ def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse: async def create( self, *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", - ], - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + ] + | NotGiven = NOT_GIVEN, output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, @@ -220,9 +225,9 @@ async def create( the Realtime API. Args: - model: The Realtime model used for this session. - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the @@ -249,7 +254,10 @@ async def create( modalities: The set of modalities the model can respond with. To disable audio, set this to ["text"]. + model: The Realtime model used for this session. + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. @@ -279,12 +287,12 @@ async def create( "/realtime/sessions", body=await async_maybe_transform( { - "model": model, "input_audio_format": input_audio_format, "input_audio_transcription": input_audio_transcription, "instructions": instructions, "max_response_output_tokens": max_response_output_tokens, "modalities": modalities, + "model": model, "output_audio_format": output_audio_format, "temperature": temperature, "tool_choice": tool_choice, diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 201ae3f4c6..a9685c507a 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -251,9 +251,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -509,9 +506,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -760,9 +754,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1112,9 +1103,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1370,9 +1358,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1621,9 +1606,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 81a3e354e6..382a42340e 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -68,7 +68,8 @@ def create( `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to @@ -180,7 +181,8 @@ async def create( `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index a60d000708..ed1a1ce748 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -20,11 +20,11 @@ class SpeechCreateParams(TypedDict, total=False): `tts-1` or `tts-1-hd` """ - voice: Required[Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]] + voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]] """The voice to use when generating the audio. - Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. - Previews of the voices are available in the + Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, + `sage` and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event.py b/src/openai/types/beta/realtime/conversation_item_create_event.py index 50d309675b..c4f72b9aff 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event.py @@ -20,9 +20,10 @@ class ConversationItemCreateEvent(BaseModel): """Optional client-generated ID used to identify this event.""" previous_item_id: Optional[str] = None - """The ID of the preceding item after which the new item will be inserted. - - If not set, the new item will be appended to the end of the conversation. If - set, it allows an item to be inserted mid-conversation. If the ID cannot be - found, an error will be returned and the item will not be added. + """ + The ID of the preceding item after which the new item will be inserted. If not + set, the new item will be appended to the end of the conversation. If set to + `root`, the new item will be added to the beginning of the conversation. If set + to an existing ID, it allows an item to be inserted mid-conversation. If the ID + cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event_param.py b/src/openai/types/beta/realtime/conversation_item_create_event_param.py index b8c8bbc251..6da5a63a9d 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event_param.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event_param.py @@ -20,9 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False): """Optional client-generated ID used to identify this event.""" previous_item_id: str - """The ID of the preceding item after which the new item will be inserted. - - If not set, the new item will be appended to the end of the conversation. If - set, it allows an item to be inserted mid-conversation. If the ID cannot be - found, an error will be returned and the item will not be added. + """ + The ID of the preceding item after which the new item will be inserted. If not + set, the new item will be appended to the end of the conversation. If set to + `root`, the new item will be added to the beginning of the conversation. If set + to an existing ID, it allows an item to be inserted mid-conversation. If the ID + cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index 09cdbb02bc..2d028f817c 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -63,7 +63,12 @@ class Session(BaseModel): """Unique identifier for the session object.""" input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: Optional[InputAudioTranscription] = None """ @@ -117,7 +122,11 @@ class Session(BaseModel): """The Realtime model used for this session.""" output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: Optional[float] = None """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index f56f2c5c22..3708efeecd 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -3,25 +3,19 @@ from __future__ import annotations from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, TypedDict __all__ = ["SessionCreateParams", "InputAudioTranscription", "Tool", "TurnDetection"] class SessionCreateParams(TypedDict, total=False): - model: Required[ - Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - ] - """The Realtime model used for this session.""" - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: InputAudioTranscription """ @@ -61,8 +55,21 @@ class SessionCreateParams(TypedDict, total=False): To disable audio, set this to ["text"]. """ + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: float """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index c04220aa25..322e588a4e 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -65,17 +65,13 @@ class SessionTurnDetection(BaseModel): class Session(BaseModel): - model: Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - """The Realtime model used for this session.""" - input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: Optional[SessionInputAudioTranscription] = None """ @@ -115,8 +111,23 @@ class Session(BaseModel): To disable audio, set this to ["text"]. """ + model: Optional[ + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + ] = None + """The Realtime model used for this session.""" + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: Optional[float] = None """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index aa06069b04..c01d9b6887 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -71,19 +71,13 @@ class SessionTurnDetection(TypedDict, total=False): class Session(TypedDict, total=False): - model: Required[ - Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - ] - """The Realtime model used for this session.""" - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: SessionInputAudioTranscription """ @@ -123,8 +117,21 @@ class Session(TypedDict, total=False): To disable audio, set this to ["text"]. """ + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: float """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 4b53e70890..cb812a2702 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -60,11 +60,7 @@ class ChatCompletion(BaseModel): """The object type, which is always `chat.completion`.""" service_tier: Optional[Literal["scale", "default"]] = None - """The service tier used for processing the request. - - This field is only included if the `service_tier` parameter is specified in the - request. - """ + """The service tier used for processing the request.""" system_fingerprint: Optional[str] = None """This fingerprint represents the backend configuration that the model runs with. diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 35e3a3d784..229fb822f4 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): """The role of the messages author, in this case `assistant`.""" audio: Optional[Audio] - """Data about a previous audio response from the model. - + """ + Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio). """ diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 9ec6dc4bdb..7b0ae2e121 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -129,11 +129,7 @@ class ChatCompletionChunk(BaseModel): """The object type, which is always `chat.completion.chunk`.""" service_tier: Optional[Literal["scale", "default"]] = None - """The service tier used for processing the request. - - This field is only included if the `service_tier` parameter is specified in the - request. - """ + """The service tier used for processing the request.""" system_fingerprint: Optional[str] = None """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index f168ddea6e..30d930b120 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -220,9 +220,6 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` - utilized. """ stop: Union[Optional[str], List[str]] diff --git a/src/openai/types/embedding_create_params.py b/src/openai/types/embedding_create_params.py index 1385762885..a90566449b 100644 --- a/src/openai/types/embedding_create_params.py +++ b/src/openai/types/embedding_create_params.py @@ -19,7 +19,8 @@ class EmbeddingCreateParams(TypedDict, total=False): (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. """ model: Required[Union[str, EmbeddingModel]] diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index 65bfa27572..908aa983be 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -19,20 +19,18 @@ class TestSessions: @parametrize def test_method_create(self, client: OpenAI) -> None: - session = client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", - ) + session = client.beta.realtime.sessions.create() assert_matches_type(SessionCreateResponse, session, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: session = client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", input_audio_format="pcm16", input_audio_transcription={"model": "model"}, instructions="instructions", max_response_output_tokens=0, modalities=["text"], + model="gpt-4o-realtime-preview", output_audio_format="pcm16", temperature=0, tool_choice="tool_choice", @@ -57,9 +55,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: - response = client.beta.realtime.sessions.with_raw_response.create( - model="gpt-4o-realtime-preview", - ) + response = client.beta.realtime.sessions.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -68,9 +64,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: - with client.beta.realtime.sessions.with_streaming_response.create( - model="gpt-4o-realtime-preview", - ) as response: + with client.beta.realtime.sessions.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -85,20 +79,18 @@ class TestAsyncSessions: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: - session = await async_client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", - ) + session = await async_client.beta.realtime.sessions.create() assert_matches_type(SessionCreateResponse, session, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: session = await async_client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", input_audio_format="pcm16", input_audio_transcription={"model": "model"}, instructions="instructions", max_response_output_tokens=0, modalities=["text"], + model="gpt-4o-realtime-preview", output_audio_format="pcm16", temperature=0, tool_choice="tool_choice", @@ -123,9 +115,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.realtime.sessions.with_raw_response.create( - model="gpt-4o-realtime-preview", - ) + response = await async_client.beta.realtime.sessions.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,9 +124,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.realtime.sessions.with_streaming_response.create( - model="gpt-4o-realtime-preview", - ) as response: + async with async_client.beta.realtime.sessions.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 393a790549..25c9a36164 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -72,7 +72,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -187,7 +187,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -321,7 +321,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -436,7 +436,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index ad2679cabe..9ec503c1e3 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -38,7 +38,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -98,7 +98,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream_options={"include_usage": True}, suffix="test.", @@ -160,7 +160,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -220,7 +220,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream_options={"include_usage": True}, suffix="test.", From c111dab6af1bb40e1f8768c9941dc7c292293e59 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:22:04 +0000 Subject: [PATCH 21/24] release: 1.60.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 21 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 32b4e18516..88c2f64985 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.59.9" + ".": "1.60.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 86951242c5..9f90d3dd22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 1.60.0 (2025-01-22) + +Full Changelog: [v1.59.9...v1.60.0](https://github.com/openai/openai-python/compare/v1.59.9...v1.60.0) + +### Features + +* **api:** update enum values, comments, and examples ([#2045](https://github.com/openai/openai-python/issues/2045)) ([e8205fd](https://github.com/openai/openai-python/commit/e8205fd58f0d677f476c577a8d9afb90f5710506)) + + +### Chores + +* **internal:** minor style changes ([#2043](https://github.com/openai/openai-python/issues/2043)) ([89a9dd8](https://github.com/openai/openai-python/commit/89a9dd821eaf5300ad11b0270b61fdfa4fd6e9b6)) + + +### Documentation + +* **readme:** mention failed requests in request IDs ([5f7c30b](https://github.com/openai/openai-python/commit/5f7c30bc006ffb666c324011a68aae357cb33e35)) + ## 1.59.9 (2025-01-20) Full Changelog: [v1.59.8...v1.59.9](https://github.com/openai/openai-python/compare/v1.59.8...v1.59.9) diff --git a/pyproject.toml b/pyproject.toml index 0b142a6bd8..80c79ff585 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.59.9" +version = "1.60.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 7c92c3adf3..ef0ddcfc5c 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.59.9" # x-release-please-version +__version__ = "1.60.0" # x-release-please-version From abc5459c7504eec25a67b35104e2e09e7d8f232c Mon Sep 17 00:00:00 2001 From: Fernando de Oliveira <5161098+fedeoliv@users.noreply.github.com> Date: Wed, 22 Jan 2025 16:44:26 -0500 Subject: [PATCH 22/24] docs(examples/azure): add async snippet (#1787) --- examples/azure_ad.py | 79 ++++++++++++++++++++++++++++++++------------ 1 file changed, 58 insertions(+), 21 deletions(-) diff --git a/examples/azure_ad.py b/examples/azure_ad.py index 1b0d81863d..67e2f23713 100755 --- a/examples/azure_ad.py +++ b/examples/azure_ad.py @@ -1,30 +1,67 @@ -from azure.identity import DefaultAzureCredential, get_bearer_token_provider +import asyncio -from openai import AzureOpenAI +from openai.lib.azure import AzureOpenAI, AsyncAzureOpenAI, AzureADTokenProvider, AsyncAzureADTokenProvider -token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") +scopes = "https://cognitiveservices.azure.com/.default" - -# may change in the future +# May change in the future # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning api_version = "2023-07-01-preview" # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource endpoint = "https://my-resource.openai.azure.com" -client = AzureOpenAI( - api_version=api_version, - azure_endpoint=endpoint, - azure_ad_token_provider=token_provider, -) - -completion = client.chat.completions.create( - model="deployment-name", # e.g. gpt-35-instant - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], -) -print(completion.to_json()) +deployment_name = "deployment-name" # e.g. gpt-35-instant + + +def sync_main() -> None: + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + token_provider: AzureADTokenProvider = get_bearer_token_provider(DefaultAzureCredential(), scopes) + + client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider, + ) + + completion = client.chat.completions.create( + model=deployment_name, + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + } + ], + ) + + print(completion.to_json()) + + +async def async_main() -> None: + from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider + + token_provider: AsyncAzureADTokenProvider = get_bearer_token_provider(DefaultAzureCredential(), scopes) + + client = AsyncAzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider, + ) + + completion = await client.chat.completions.create( + model=deployment_name, + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + } + ], + ) + + print(completion.to_json()) + + +sync_main() + +asyncio.run(async_main()) From 27d0e67b1d121ccc5b48c95e1f0bc3f6e93e9bd3 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 23 Jan 2025 12:39:04 +0000 Subject: [PATCH 23/24] chore(internal): minor formatting changes (#2050) --- .github/workflows/ci.yml | 1 + scripts/bootstrap | 2 +- scripts/lint | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de70348b9c..26f497db1f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,6 +29,7 @@ jobs: - name: Run lints run: ./scripts/lint + test: name: test runs-on: ubuntu-latest diff --git a/scripts/bootstrap b/scripts/bootstrap index 29df07e77b..9910ec05fc 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/scripts/lint b/scripts/lint index 64495ee345..55bc1dd711 100755 --- a/scripts/lint +++ b/scripts/lint @@ -9,4 +9,3 @@ rye run lint echo "==> Making sure it imports" rye run python -c 'import openai' - From b95be16e7c8a76c3d63335df13ab0d55ba3d5c35 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 24 Jan 2025 05:04:08 +0000 Subject: [PATCH 24/24] release: 1.60.1 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 88c2f64985..0b39405429 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.60.0" + ".": "1.60.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f90d3dd22..873a6a254d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.60.1 (2025-01-24) + +Full Changelog: [v1.60.0...v1.60.1](https://github.com/openai/openai-python/compare/v1.60.0...v1.60.1) + +### Chores + +* **internal:** minor formatting changes ([#2050](https://github.com/openai/openai-python/issues/2050)) ([9c44192](https://github.com/openai/openai-python/commit/9c44192be5776d9252d36dc027a33c60b33d81b2)) + + +### Documentation + +* **examples/azure:** add async snippet ([#1787](https://github.com/openai/openai-python/issues/1787)) ([f60eda1](https://github.com/openai/openai-python/commit/f60eda1c1e8caf0ec2274b18b3fb2252304196db)) + ## 1.60.0 (2025-01-22) Full Changelog: [v1.59.9...v1.60.0](https://github.com/openai/openai-python/compare/v1.59.9...v1.60.0) diff --git a/pyproject.toml b/pyproject.toml index 80c79ff585..fdfc9c73e3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.60.0" +version = "1.60.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index ef0ddcfc5c..b87aa8abd4 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.60.0" # x-release-please-version +__version__ = "1.60.1" # x-release-please-version