diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index de70348b9c..26f497db1f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,6 +29,7 @@ jobs: - name: Run lints run: ./scripts/lint + test: name: test runs-on: ubuntu-latest diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7da3bd4caf..0b39405429 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.59.7" + ".": "1.60.1" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 9600edae3b..d518bac586 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 69 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-b5b0e2c794b012919701c3fd43286af10fa25d33ceb8a881bec2636028f446e0.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai-3904ef6b29a89c98f93a9b7da19879695f3c440564be6384db7af1b734611ede.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 08674b4a36..873a6a254d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,80 @@ # Changelog +## 1.60.1 (2025-01-24) + +Full Changelog: [v1.60.0...v1.60.1](https://github.com/openai/openai-python/compare/v1.60.0...v1.60.1) + +### Chores + +* **internal:** minor formatting changes ([#2050](https://github.com/openai/openai-python/issues/2050)) ([9c44192](https://github.com/openai/openai-python/commit/9c44192be5776d9252d36dc027a33c60b33d81b2)) + + +### Documentation + +* **examples/azure:** add async snippet ([#1787](https://github.com/openai/openai-python/issues/1787)) ([f60eda1](https://github.com/openai/openai-python/commit/f60eda1c1e8caf0ec2274b18b3fb2252304196db)) + +## 1.60.0 (2025-01-22) + +Full Changelog: [v1.59.9...v1.60.0](https://github.com/openai/openai-python/compare/v1.59.9...v1.60.0) + +### Features + +* **api:** update enum values, comments, and examples ([#2045](https://github.com/openai/openai-python/issues/2045)) ([e8205fd](https://github.com/openai/openai-python/commit/e8205fd58f0d677f476c577a8d9afb90f5710506)) + + +### Chores + +* **internal:** minor style changes ([#2043](https://github.com/openai/openai-python/issues/2043)) ([89a9dd8](https://github.com/openai/openai-python/commit/89a9dd821eaf5300ad11b0270b61fdfa4fd6e9b6)) + + +### Documentation + +* **readme:** mention failed requests in request IDs ([5f7c30b](https://github.com/openai/openai-python/commit/5f7c30bc006ffb666c324011a68aae357cb33e35)) + +## 1.59.9 (2025-01-20) + +Full Changelog: [v1.59.8...v1.59.9](https://github.com/openai/openai-python/compare/v1.59.8...v1.59.9) + +### Bug Fixes + +* **tests:** make test_get_platform less flaky ([#2040](https://github.com/openai/openai-python/issues/2040)) ([72ea05c](https://github.com/openai/openai-python/commit/72ea05cf18caaa7a5e6fe7e2251ab93fa0ba3140)) + + +### Chores + +* **internal:** avoid pytest-asyncio deprecation warning ([#2041](https://github.com/openai/openai-python/issues/2041)) ([b901046](https://github.com/openai/openai-python/commit/b901046ddda9c79b7f019e2263c02d126a3b2ee2)) +* **internal:** update websockets dep ([#2036](https://github.com/openai/openai-python/issues/2036)) ([642cd11](https://github.com/openai/openai-python/commit/642cd119482c6fbca925ba702ad2579f9dc47bf9)) + + +### Documentation + +* fix typo ([#2031](https://github.com/openai/openai-python/issues/2031)) ([02fcf15](https://github.com/openai/openai-python/commit/02fcf15611953089826a74725cb96201d94658bb)) +* **raw responses:** fix duplicate `the` ([#2039](https://github.com/openai/openai-python/issues/2039)) ([9b8eab9](https://github.com/openai/openai-python/commit/9b8eab99fdc6a581a1f5cc421c6f74b0e2b30415)) + +## 1.59.8 (2025-01-17) + +Full Changelog: [v1.59.7...v1.59.8](https://github.com/openai/openai-python/compare/v1.59.7...v1.59.8) + +### Bug Fixes + +* streaming ([c16f58e](https://github.com/openai/openai-python/commit/c16f58ead0bc85055b164182689ba74b7e939dfa)) +* **structured outputs:** avoid parsing empty empty content ([#2023](https://github.com/openai/openai-python/issues/2023)) ([6d3513c](https://github.com/openai/openai-python/commit/6d3513c86f6e5800f8f73a45e089b7a205327121)) +* **structured outputs:** correct schema coercion for inline ref expansion ([#2025](https://github.com/openai/openai-python/issues/2025)) ([2f4f0b3](https://github.com/openai/openai-python/commit/2f4f0b374207f162060c328b71ec995049dc42e8)) +* **types:** correct type for vector store chunking strategy ([#2017](https://github.com/openai/openai-python/issues/2017)) ([e389279](https://github.com/openai/openai-python/commit/e38927950a5cdad99065853fe7b72aad6bb322e9)) + + +### Chores + +* **examples:** update realtime model ([f26746c](https://github.com/openai/openai-python/commit/f26746cbcd893d66cf8a3fd68a7c3779dc8c833c)), closes [#2020](https://github.com/openai/openai-python/issues/2020) +* **internal:** bump pyright dependency ([#2021](https://github.com/openai/openai-python/issues/2021)) ([0a9a0f5](https://github.com/openai/openai-python/commit/0a9a0f5d8b9d5457643798287f893305006dd518)) +* **internal:** streaming refactors ([#2012](https://github.com/openai/openai-python/issues/2012)) ([d76a748](https://github.com/openai/openai-python/commit/d76a748f606743407f94dfc26758095560e2082a)) +* **internal:** update deps ([#2015](https://github.com/openai/openai-python/issues/2015)) ([514e0e4](https://github.com/openai/openai-python/commit/514e0e415f87ab4510262d29ed6125384e017b84)) + + +### Documentation + +* **examples/azure:** example script with realtime API ([#1967](https://github.com/openai/openai-python/issues/1967)) ([84f2f9c](https://github.com/openai/openai-python/commit/84f2f9c0439229a7db7136fe78419292d34d1f81)) + ## 1.59.7 (2025-01-13) Full Changelog: [v1.59.6...v1.59.7](https://github.com/openai/openai-python/compare/v1.59.6...v1.59.7) diff --git a/README.md b/README.md index 907ce9adc3..23e49ac9d1 100644 --- a/README.md +++ b/README.md @@ -275,7 +275,7 @@ from openai import AsyncOpenAI async def main(): client = AsyncOpenAI() - async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection: + async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: await connection.session.update(session={'modalities': ['text']}) await connection.conversation.item.create( @@ -309,7 +309,7 @@ Whenever an error occurs, the Realtime API will send an [`error` event](https:// ```py client = AsyncOpenAI() -async with client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as connection: +async with client.beta.realtime.connect(model="gpt-4o-realtime-preview") as connection: ... async for event in connection: if event.type == 'error': @@ -499,6 +499,21 @@ Note that unlike other properties that use an `_` prefix, the `_request_id` prop *is* public. Unless documented otherwise, *all* other `_` prefix properties, methods and modules are *private*. +> [!IMPORTANT] +> If you need to access request IDs for failed requests you must catch the `APIStatusError` exception + +```python +import openai + +try: + completion = await client.chat.completions.create( + messages=[{"role": "user", "content": "Say this is a test"}], model="gpt-4" + ) +except openai.APIStatusError as exc: + print(exc.request_id) # req_123 + raise exc +``` + ### Retries diff --git a/api.md b/api.md index ace93e0559..1edd3f6589 100644 --- a/api.md +++ b/api.md @@ -314,7 +314,7 @@ from openai.types.beta import ( OtherFileChunkingStrategyObject, StaticFileChunkingStrategy, StaticFileChunkingStrategyObject, - StaticFileChunkingStrategyParam, + StaticFileChunkingStrategyObjectParam, VectorStore, VectorStoreDeleted, ) diff --git a/examples/azure_ad.py b/examples/azure_ad.py index 1b0d81863d..67e2f23713 100755 --- a/examples/azure_ad.py +++ b/examples/azure_ad.py @@ -1,30 +1,67 @@ -from azure.identity import DefaultAzureCredential, get_bearer_token_provider +import asyncio -from openai import AzureOpenAI +from openai.lib.azure import AzureOpenAI, AsyncAzureOpenAI, AzureADTokenProvider, AsyncAzureADTokenProvider -token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") +scopes = "https://cognitiveservices.azure.com/.default" - -# may change in the future +# May change in the future # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning api_version = "2023-07-01-preview" # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource endpoint = "https://my-resource.openai.azure.com" -client = AzureOpenAI( - api_version=api_version, - azure_endpoint=endpoint, - azure_ad_token_provider=token_provider, -) - -completion = client.chat.completions.create( - model="deployment-name", # e.g. gpt-35-instant - messages=[ - { - "role": "user", - "content": "How do I output all files in a directory using Python?", - }, - ], -) -print(completion.to_json()) +deployment_name = "deployment-name" # e.g. gpt-35-instant + + +def sync_main() -> None: + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + token_provider: AzureADTokenProvider = get_bearer_token_provider(DefaultAzureCredential(), scopes) + + client = AzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider, + ) + + completion = client.chat.completions.create( + model=deployment_name, + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + } + ], + ) + + print(completion.to_json()) + + +async def async_main() -> None: + from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider + + token_provider: AsyncAzureADTokenProvider = get_bearer_token_provider(DefaultAzureCredential(), scopes) + + client = AsyncAzureOpenAI( + api_version=api_version, + azure_endpoint=endpoint, + azure_ad_token_provider=token_provider, + ) + + completion = await client.chat.completions.create( + model=deployment_name, + messages=[ + { + "role": "user", + "content": "How do I output all files in a directory using Python?", + } + ], + ) + + print(completion.to_json()) + + +sync_main() + +asyncio.run(async_main()) diff --git a/examples/realtime/azure_realtime.py b/examples/realtime/azure_realtime.py new file mode 100644 index 0000000000..de88d47052 --- /dev/null +++ b/examples/realtime/azure_realtime.py @@ -0,0 +1,57 @@ +import os +import asyncio + +from azure.identity.aio import DefaultAzureCredential, get_bearer_token_provider + +from openai import AsyncAzureOpenAI + +# Azure OpenAI Realtime Docs + +# How-to: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio +# Supported models and API versions: https://learn.microsoft.com/azure/ai-services/openai/how-to/realtime-audio#supported-models +# Entra ID auth: https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity + + +async def main() -> None: + """The following example demonstrates how to configure Azure OpenAI to use the Realtime API. + For an audio example, see push_to_talk_app.py and update the client and model parameter accordingly. + + When prompted for user input, type a message and hit enter to send it to the model. + Enter "q" to quit the conversation. + """ + + credential = DefaultAzureCredential() + client = AsyncAzureOpenAI( + azure_endpoint=os.environ["AZURE_OPENAI_ENDPOINT"], + azure_ad_token_provider=get_bearer_token_provider(credential, "https://cognitiveservices.azure.com/.default"), + api_version="2024-10-01-preview", + ) + async with client.beta.realtime.connect( + model="gpt-4o-realtime-preview", # deployment name for your model + ) as connection: + await connection.session.update(session={"modalities": ["text"]}) # type: ignore + while True: + user_input = input("Enter a message: ") + if user_input == "q": + break + + await connection.conversation.item.create( + item={ + "type": "message", + "role": "user", + "content": [{"type": "input_text", "text": user_input}], + } + ) + await connection.response.create() + async for event in connection: + if event.type == "response.text.delta": + print(event.delta, flush=True, end="") + elif event.type == "response.text.done": + print() + elif event.type == "response.done": + break + + await credential.close() + + +asyncio.run(main()) diff --git a/examples/realtime/push_to_talk_app.py b/examples/realtime/push_to_talk_app.py index d46945a8ed..8dc303a83a 100755 --- a/examples/realtime/push_to_talk_app.py +++ b/examples/realtime/push_to_talk_app.py @@ -152,7 +152,7 @@ async def on_mount(self) -> None: self.run_worker(self.send_mic_audio()) async def handle_realtime_connection(self) -> None: - async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview-2024-10-01") as conn: + async with self.client.beta.realtime.connect(model="gpt-4o-realtime-preview") as conn: self.connection = conn self.connected.set() diff --git a/mypy.ini b/mypy.ini index 1ea1fe909d..660f1a086e 100644 --- a/mypy.ini +++ b/mypy.ini @@ -44,7 +44,7 @@ cache_fine_grained = True # ``` # Changing this codegen to make mypy happy would increase complexity # and would not be worth it. -disable_error_code = func-returns-value +disable_error_code = func-returns-value,overload-cannot-match # https://github.com/python/mypy/issues/12162 [mypy.overrides] diff --git a/pyproject.toml b/pyproject.toml index a47eadec1c..63f4f41fc6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.59.7" +version = "1.60.1" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" @@ -142,6 +142,7 @@ testpaths = ["tests"] addopts = "--tb=short" xfail_strict = true asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "session" filterwarnings = [ "error" ] diff --git a/requirements-dev.lock b/requirements-dev.lock index 15ecbf081a..38cc6e1cf2 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -83,7 +83,7 @@ msal==1.31.0 # via msal-extensions msal-extensions==1.2.0 # via azure-identity -mypy==1.13.0 +mypy==1.14.1 mypy-extensions==1.0.0 # via black # via mypy @@ -124,7 +124,7 @@ pygments==2.18.0 # via rich pyjwt==2.8.0 # via msal -pyright==1.1.390 +pyright==1.1.392.post0 pytest==8.3.3 # via pytest-asyncio pytest-asyncio==0.24.0 @@ -184,7 +184,7 @@ urllib3==2.2.1 # via requests virtualenv==20.24.5 # via nox -websockets==14.1 +websockets==14.2 # via openai zipp==3.17.0 # via importlib-metadata diff --git a/requirements.lock b/requirements.lock index a3e3602abe..cbdff94fa3 100644 --- a/requirements.lock +++ b/requirements.lock @@ -63,5 +63,5 @@ typing-extensions==4.12.2 # via pydantic-core tzdata==2024.1 # via pandas -websockets==14.1 +websockets==14.2 # via openai diff --git a/scripts/bootstrap b/scripts/bootstrap index 29df07e77b..9910ec05fc 100755 --- a/scripts/bootstrap +++ b/scripts/bootstrap @@ -4,7 +4,7 @@ set -e cd "$(dirname "$0")/.." -if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then +if ! command -v rye >/dev/null 2>&1 && [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then brew bundle check >/dev/null 2>&1 || { echo "==> Installing Homebrew dependencies…" brew bundle diff --git a/scripts/lint b/scripts/lint index 64495ee345..55bc1dd711 100755 --- a/scripts/lint +++ b/scripts/lint @@ -9,4 +9,3 @@ rye run lint echo "==> Making sure it imports" rye run python -c 'import openai' - diff --git a/src/openai/_legacy_response.py b/src/openai/_legacy_response.py index 7a14f27adb..8880e5f104 100644 --- a/src/openai/_legacy_response.py +++ b/src/openai/_legacy_response.py @@ -205,6 +205,8 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to and is_annotated_type(cast_to): cast_to = extract_type_arg(cast_to, 0) + origin = get_origin(cast_to) or cast_to + if self._stream: if to: if not is_stream_class_type(to): @@ -261,15 +263,15 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == bool: return cast(R, response.text.lower() == "true") - origin = get_origin(cast_to) or cast_to - if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent): return cast(R, cast_to(response)) # type: ignore if origin == LegacyAPIResponse: raise RuntimeError("Unexpected state - cast_to is `APIResponse`") - if inspect.isclass(origin) and issubclass(origin, httpx.Response): + if inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) and issubclass(origin, httpx.Response): # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response # and pass that class to our request functions. We cannot change the variance to be either # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct @@ -279,7 +281,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( diff --git a/src/openai/_response.py b/src/openai/_response.py index 1527446585..95e94e6537 100644 --- a/src/openai/_response.py +++ b/src/openai/_response.py @@ -136,6 +136,8 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to and is_annotated_type(cast_to): cast_to = extract_type_arg(cast_to, 0) + origin = get_origin(cast_to) or cast_to + if self._is_sse_stream: if to: if not is_stream_class_type(to): @@ -195,8 +197,6 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: if cast_to == bool: return cast(R, response.text.lower() == "true") - origin = get_origin(cast_to) or cast_to - # handle the legacy binary response case if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent": return cast(R, cast_to(response)) # type: ignore @@ -214,7 +214,13 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) - if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): + if ( + inspect.isclass( + origin # pyright: ignore[reportUnknownArgumentType] + ) + and not issubclass(origin, BaseModel) + and issubclass(origin, pydantic.BaseModel) + ): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( diff --git a/src/openai/_version.py b/src/openai/_version.py index 656d17ff63..b87aa8abd4 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.59.7" # x-release-please-version +__version__ = "1.60.1" # x-release-please-version diff --git a/src/openai/lib/_parsing/_completions.py b/src/openai/lib/_parsing/_completions.py index f1fa9f2b55..33c4ccb946 100644 --- a/src/openai/lib/_parsing/_completions.py +++ b/src/openai/lib/_parsing/_completions.py @@ -157,7 +157,7 @@ def maybe_parse_content( response_format: type[ResponseFormatT] | ResponseFormatParam | NotGiven, message: ChatCompletionMessage | ParsedChatCompletionMessage[object], ) -> ResponseFormatT | None: - if has_rich_response_format(response_format) and message.content is not None and not message.refusal: + if has_rich_response_format(response_format) and message.content and not message.refusal: return _parse_content(response_format, message.content) return None diff --git a/src/openai/lib/_pydantic.py b/src/openai/lib/_pydantic.py index 4e8bc772be..c2d73e5fc6 100644 --- a/src/openai/lib/_pydantic.py +++ b/src/openai/lib/_pydantic.py @@ -108,6 +108,9 @@ def _ensure_strict_json_schema( # properties from the json schema take priority over the ones on the `$ref` json_schema.update({**resolved, **json_schema}) json_schema.pop("$ref") + # Since the schema expanded from `$ref` might not have `additionalProperties: false` applied, + # we call `_ensure_strict_json_schema` again to fix the inlined schema and ensure it's valid. + return _ensure_strict_json_schema(json_schema, path=path, root=root) return json_schema diff --git a/src/openai/resources/audio/audio.py b/src/openai/resources/audio/audio.py index 18bd7b812c..383b7073bf 100644 --- a/src/openai/resources/audio/audio.py +++ b/src/openai/resources/audio/audio.py @@ -48,7 +48,7 @@ def speech(self) -> Speech: @cached_property def with_raw_response(self) -> AudioWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -81,7 +81,7 @@ def speech(self) -> AsyncSpeech: @cached_property def with_raw_response(self) -> AsyncAudioWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py index 09faaddda6..ad01118161 100644 --- a/src/openai/resources/audio/speech.py +++ b/src/openai/resources/audio/speech.py @@ -32,7 +32,7 @@ class Speech(SyncAPIResource): @cached_property def with_raw_response(self) -> SpeechWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -53,7 +53,7 @@ def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -73,9 +73,9 @@ def create( One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` - voice: The voice to use when generating the audio. Supported voices are `alloy`, - `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - available in the + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, @@ -116,7 +116,7 @@ class AsyncSpeech(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSpeechWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -137,7 +137,7 @@ async def create( *, input: str, model: Union[str, SpeechModel], - voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], + voice: Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -157,9 +157,9 @@ async def create( One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1` or `tts-1-hd` - voice: The voice to use when generating the audio. Supported voices are `alloy`, - `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are - available in the + voice: The voice to use when generating the audio. Supported voices are `alloy`, `ash`, + `coral`, `echo`, `fable`, `onyx`, `nova`, `sage` and `shimmer`. Previews of the + voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 8b5f4404fc..341446c43a 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -36,7 +36,7 @@ class Transcriptions(SyncAPIResource): @cached_property def with_raw_response(self) -> TranscriptionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -200,7 +200,7 @@ class AsyncTranscriptions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py index a2d28afa03..cd3132dc57 100644 --- a/src/openai/resources/audio/translations.py +++ b/src/openai/resources/audio/translations.py @@ -36,7 +36,7 @@ class Translations(SyncAPIResource): @cached_property def with_raw_response(self) -> TranslationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -179,7 +179,7 @@ class AsyncTranslations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranslationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py index 7cab75785d..4a887642e9 100644 --- a/src/openai/resources/batches.py +++ b/src/openai/resources/batches.py @@ -31,7 +31,7 @@ class Batches(SyncAPIResource): @cached_property def with_raw_response(self) -> BatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -236,7 +236,7 @@ class AsyncBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py index 7df212f155..2f2482b648 100644 --- a/src/openai/resources/beta/assistants.py +++ b/src/openai/resources/beta/assistants.py @@ -36,7 +36,7 @@ class Assistants(SyncAPIResource): @cached_property def with_raw_response(self) -> AssistantsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -422,7 +422,7 @@ class AsyncAssistants(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncAssistantsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py index 1ffa6c8e79..5d71cff3f1 100644 --- a/src/openai/resources/beta/beta.py +++ b/src/openai/resources/beta/beta.py @@ -65,7 +65,7 @@ def threads(self) -> Threads: @cached_property def with_raw_response(self) -> BetaWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -106,7 +106,7 @@ def threads(self) -> AsyncThreads: @cached_property def with_raw_response(self) -> AsyncBetaWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py index b39b410ecf..235790a9f5 100644 --- a/src/openai/resources/beta/realtime/realtime.py +++ b/src/openai/resources/beta/realtime/realtime.py @@ -58,7 +58,7 @@ def sessions(self) -> Sessions: @cached_property def with_raw_response(self) -> RealtimeWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -110,7 +110,7 @@ def sessions(self) -> AsyncSessions: @cached_property def with_raw_response(self) -> AsyncRealtimeWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py index 1d1ee701e5..b920c89207 100644 --- a/src/openai/resources/beta/realtime/sessions.py +++ b/src/openai/resources/beta/realtime/sessions.py @@ -27,7 +27,7 @@ class Sessions(SyncAPIResource): @cached_property def with_raw_response(self) -> SessionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -46,18 +46,19 @@ def with_streaming_response(self) -> SessionsWithStreamingResponse: def create( self, *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", - ], - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + ] + | NotGiven = NOT_GIVEN, output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, @@ -81,9 +82,9 @@ def create( the Realtime API. Args: - model: The Realtime model used for this session. - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the @@ -110,7 +111,10 @@ def create( modalities: The set of modalities the model can respond with. To disable audio, set this to ["text"]. + model: The Realtime model used for this session. + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. @@ -140,12 +144,12 @@ def create( "/realtime/sessions", body=maybe_transform( { - "model": model, "input_audio_format": input_audio_format, "input_audio_transcription": input_audio_transcription, "instructions": instructions, "max_response_output_tokens": max_response_output_tokens, "modalities": modalities, + "model": model, "output_audio_format": output_audio_format, "temperature": temperature, "tool_choice": tool_choice, @@ -166,7 +170,7 @@ class AsyncSessions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSessionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -185,18 +189,19 @@ def with_streaming_response(self) -> AsyncSessionsWithStreamingResponse: async def create( self, *, + input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, + input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, + instructions: str | NotGiven = NOT_GIVEN, + max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, + modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, model: Literal[ "gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17", "gpt-4o-mini-realtime-preview", "gpt-4o-mini-realtime-preview-2024-12-17", - ], - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, - input_audio_transcription: session_create_params.InputAudioTranscription | NotGiven = NOT_GIVEN, - instructions: str | NotGiven = NOT_GIVEN, - max_response_output_tokens: Union[int, Literal["inf"]] | NotGiven = NOT_GIVEN, - modalities: List[Literal["text", "audio"]] | NotGiven = NOT_GIVEN, + ] + | NotGiven = NOT_GIVEN, output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, tool_choice: str | NotGiven = NOT_GIVEN, @@ -220,9 +225,9 @@ async def create( the Realtime API. Args: - model: The Realtime model used for this session. - - input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + input_audio_format: The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For + `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel + (mono), and little-endian byte order. input_audio_transcription: Configuration for input audio transcription, defaults to off and can be set to `null` to turn off once on. Input audio transcription is not native to the @@ -249,7 +254,10 @@ async def create( modalities: The set of modalities the model can respond with. To disable audio, set this to ["text"]. + model: The Realtime model used for this session. + output_audio_format: The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. + For `pcm16`, output audio is sampled at a rate of 24kHz. temperature: Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8. @@ -279,12 +287,12 @@ async def create( "/realtime/sessions", body=await async_maybe_transform( { - "model": model, "input_audio_format": input_audio_format, "input_audio_transcription": input_audio_transcription, "instructions": instructions, "max_response_output_tokens": max_response_output_tokens, "modalities": modalities, + "model": model, "output_audio_format": output_audio_format, "temperature": temperature, "tool_choice": tool_choice, diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py index e848507387..f780f6f558 100644 --- a/src/openai/resources/beta/threads/messages.py +++ b/src/openai/resources/beta/threads/messages.py @@ -33,7 +33,7 @@ class Messages(SyncAPIResource): @cached_property def with_raw_response(self) -> MessagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -307,7 +307,7 @@ class AsyncMessages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncMessagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 0418d570ba..f32a08f235 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -64,7 +64,7 @@ def steps(self) -> Steps: @cached_property def with_raw_response(self) -> RunsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -1429,7 +1429,7 @@ def steps(self) -> AsyncSteps: @cached_property def with_raw_response(self) -> AsyncRunsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py index 9bd91e39e0..709c729d45 100644 --- a/src/openai/resources/beta/threads/runs/steps.py +++ b/src/openai/resources/beta/threads/runs/steps.py @@ -29,7 +29,7 @@ class Steps(SyncAPIResource): @cached_property def with_raw_response(self) -> StepsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -183,7 +183,7 @@ class AsyncSteps(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncStepsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index e45090abb0..186b6f63e2 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -72,7 +72,7 @@ def messages(self) -> Messages: @cached_property def with_raw_response(self) -> ThreadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -906,7 +906,7 @@ def messages(self) -> AsyncMessages: @cached_property def with_raw_response(self) -> AsyncThreadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/file_batches.py b/src/openai/resources/beta/vector_stores/file_batches.py index 9f9e643bd0..6d61e92c7f 100644 --- a/src/openai/resources/beta/vector_stores/file_batches.py +++ b/src/openai/resources/beta/vector_stores/file_batches.py @@ -36,7 +36,7 @@ class FileBatches(SyncAPIResource): @cached_property def with_raw_response(self) -> FileBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -365,7 +365,7 @@ class AsyncFileBatches(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFileBatchesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/files.py b/src/openai/resources/beta/vector_stores/files.py index 7c155ac917..febf27a753 100644 --- a/src/openai/resources/beta/vector_stores/files.py +++ b/src/openai/resources/beta/vector_stores/files.py @@ -32,7 +32,7 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -344,7 +344,7 @@ class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/beta/vector_stores/vector_stores.py b/src/openai/resources/beta/vector_stores/vector_stores.py index 61a2eadc7b..6b44c602f1 100644 --- a/src/openai/resources/beta/vector_stores/vector_stores.py +++ b/src/openai/resources/beta/vector_stores/vector_stores.py @@ -59,7 +59,7 @@ def file_batches(self) -> FileBatches: @cached_property def with_raw_response(self) -> VectorStoresWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -337,7 +337,7 @@ def file_batches(self) -> AsyncFileBatches: @cached_property def with_raw_response(self) -> AsyncVectorStoresWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/chat/chat.py b/src/openai/resources/chat/chat.py index dc23a15a8e..9c4aacc953 100644 --- a/src/openai/resources/chat/chat.py +++ b/src/openai/resources/chat/chat.py @@ -24,7 +24,7 @@ def completions(self) -> Completions: @cached_property def with_raw_response(self) -> ChatWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -49,7 +49,7 @@ def completions(self) -> AsyncCompletions: @cached_property def with_raw_response(self) -> AsyncChatWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/chat/completions.py b/src/openai/resources/chat/completions.py index 728c744327..a9685c507a 100644 --- a/src/openai/resources/chat/completions.py +++ b/src/openai/resources/chat/completions.py @@ -45,7 +45,7 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -251,9 +251,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -509,9 +506,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -760,9 +754,6 @@ def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -906,7 +897,7 @@ class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -1112,9 +1103,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1370,9 +1358,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in @@ -1621,9 +1606,6 @@ async def create( tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - When this parameter is set, the response body will include the `service_tier` - utilized. - stop: Up to 4 sequences where the API will stop generating further tokens. store: Whether or not to store the output of this chat completion request for use in diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py index 1ac3575fd5..171f509352 100644 --- a/src/openai/resources/completions.py +++ b/src/openai/resources/completions.py @@ -32,7 +32,7 @@ class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -574,7 +574,7 @@ class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index 4ab2278e89..382a42340e 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -27,7 +27,7 @@ class Embeddings(SyncAPIResource): @cached_property def with_raw_response(self) -> EmbeddingsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -68,7 +68,8 @@ def create( `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to @@ -139,7 +140,7 @@ class AsyncEmbeddings(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -180,7 +181,8 @@ async def create( `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py index 6eaea1b568..af453e1e21 100644 --- a/src/openai/resources/files.py +++ b/src/openai/resources/files.py @@ -41,7 +41,7 @@ class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -357,7 +357,7 @@ class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/fine_tuning.py b/src/openai/resources/fine_tuning/fine_tuning.py index d2bce87c48..eebde07d81 100644 --- a/src/openai/resources/fine_tuning/fine_tuning.py +++ b/src/openai/resources/fine_tuning/fine_tuning.py @@ -24,7 +24,7 @@ def jobs(self) -> Jobs: @cached_property def with_raw_response(self) -> FineTuningWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -49,7 +49,7 @@ def jobs(self) -> AsyncJobs: @cached_property def with_raw_response(self) -> AsyncFineTuningWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py index 8b5e905ea5..f86462e513 100644 --- a/src/openai/resources/fine_tuning/jobs/checkpoints.py +++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py @@ -25,7 +25,7 @@ class Checkpoints(SyncAPIResource): @cached_property def with_raw_response(self) -> CheckpointsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -96,7 +96,7 @@ class AsyncCheckpoints(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCheckpointsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py index 78eefc253c..e023d28fea 100644 --- a/src/openai/resources/fine_tuning/jobs/jobs.py +++ b/src/openai/resources/fine_tuning/jobs/jobs.py @@ -44,7 +44,7 @@ def checkpoints(self) -> Checkpoints: @cached_property def with_raw_response(self) -> JobsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -342,7 +342,7 @@ def checkpoints(self) -> AsyncCheckpoints: @cached_property def with_raw_response(self) -> AsyncJobsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py index 2fbc077dd9..30473c14f7 100644 --- a/src/openai/resources/images.py +++ b/src/openai/resources/images.py @@ -30,7 +30,7 @@ class Images(SyncAPIResource): @cached_property def with_raw_response(self) -> ImagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -287,7 +287,7 @@ class AsyncImages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncImagesWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py index d6062de230..a9693a6b0a 100644 --- a/src/openai/resources/models.py +++ b/src/openai/resources/models.py @@ -24,7 +24,7 @@ class Models(SyncAPIResource): @cached_property def with_raw_response(self) -> ModelsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -137,7 +137,7 @@ class AsyncModels(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModelsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py index ce80bb7d55..a8f03142bc 100644 --- a/src/openai/resources/moderations.py +++ b/src/openai/resources/moderations.py @@ -28,7 +28,7 @@ class Moderations(SyncAPIResource): @cached_property def with_raw_response(self) -> ModerationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -98,7 +98,7 @@ class AsyncModerations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModerationsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py index d46e5ea1bb..777469ac8e 100644 --- a/src/openai/resources/uploads/parts.py +++ b/src/openai/resources/uploads/parts.py @@ -28,7 +28,7 @@ class Parts(SyncAPIResource): @cached_property def with_raw_response(self) -> PartsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -103,7 +103,7 @@ class AsyncParts(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncPartsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py index cfb500b62c..2028decef5 100644 --- a/src/openai/resources/uploads/uploads.py +++ b/src/openai/resources/uploads/uploads.py @@ -51,7 +51,7 @@ def parts(self) -> Parts: @cached_property def with_raw_response(self) -> UploadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers @@ -344,7 +344,7 @@ def parts(self) -> AsyncParts: @cached_property def with_raw_response(self) -> AsyncUploadsWithRawResponse: """ - This property can be used as a prefix for any HTTP method call to return the + This property can be used as a prefix for any HTTP method call to return the raw response object instead of the parsed content. For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers diff --git a/src/openai/types/audio/speech_create_params.py b/src/openai/types/audio/speech_create_params.py index a60d000708..ed1a1ce748 100644 --- a/src/openai/types/audio/speech_create_params.py +++ b/src/openai/types/audio/speech_create_params.py @@ -20,11 +20,11 @@ class SpeechCreateParams(TypedDict, total=False): `tts-1` or `tts-1-hd` """ - voice: Required[Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]] + voice: Required[Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"]] """The voice to use when generating the audio. - Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. - Previews of the voices are available in the + Supported voices are `alloy`, `ash`, `coral`, `echo`, `fable`, `onyx`, `nova`, + `sage` and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options). """ diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index 7f76fed0cd..b9ea792bfa 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -43,3 +43,6 @@ from .assistant_response_format_option_param import ( AssistantResponseFormatOptionParam as AssistantResponseFormatOptionParam, ) +from .static_file_chunking_strategy_object_param import ( + StaticFileChunkingStrategyObjectParam as StaticFileChunkingStrategyObjectParam, +) diff --git a/src/openai/types/beta/file_chunking_strategy_param.py b/src/openai/types/beta/file_chunking_strategy_param.py index 46383358e5..25d94286d8 100644 --- a/src/openai/types/beta/file_chunking_strategy_param.py +++ b/src/openai/types/beta/file_chunking_strategy_param.py @@ -6,8 +6,8 @@ from typing_extensions import TypeAlias from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam -from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam +from .static_file_chunking_strategy_object_param import StaticFileChunkingStrategyObjectParam __all__ = ["FileChunkingStrategyParam"] -FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyParam] +FileChunkingStrategyParam: TypeAlias = Union[AutoFileChunkingStrategyParam, StaticFileChunkingStrategyObjectParam] diff --git a/src/openai/types/beta/realtime/conversation_item_create_event.py b/src/openai/types/beta/realtime/conversation_item_create_event.py index 50d309675b..c4f72b9aff 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event.py @@ -20,9 +20,10 @@ class ConversationItemCreateEvent(BaseModel): """Optional client-generated ID used to identify this event.""" previous_item_id: Optional[str] = None - """The ID of the preceding item after which the new item will be inserted. - - If not set, the new item will be appended to the end of the conversation. If - set, it allows an item to be inserted mid-conversation. If the ID cannot be - found, an error will be returned and the item will not be added. + """ + The ID of the preceding item after which the new item will be inserted. If not + set, the new item will be appended to the end of the conversation. If set to + `root`, the new item will be added to the beginning of the conversation. If set + to an existing ID, it allows an item to be inserted mid-conversation. If the ID + cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/conversation_item_create_event_param.py b/src/openai/types/beta/realtime/conversation_item_create_event_param.py index b8c8bbc251..6da5a63a9d 100644 --- a/src/openai/types/beta/realtime/conversation_item_create_event_param.py +++ b/src/openai/types/beta/realtime/conversation_item_create_event_param.py @@ -20,9 +20,10 @@ class ConversationItemCreateEventParam(TypedDict, total=False): """Optional client-generated ID used to identify this event.""" previous_item_id: str - """The ID of the preceding item after which the new item will be inserted. - - If not set, the new item will be appended to the end of the conversation. If - set, it allows an item to be inserted mid-conversation. If the ID cannot be - found, an error will be returned and the item will not be added. + """ + The ID of the preceding item after which the new item will be inserted. If not + set, the new item will be appended to the end of the conversation. If set to + `root`, the new item will be added to the beginning of the conversation. If set + to an existing ID, it allows an item to be inserted mid-conversation. If the ID + cannot be found, an error will be returned and the item will not be added. """ diff --git a/src/openai/types/beta/realtime/session.py b/src/openai/types/beta/realtime/session.py index 09cdbb02bc..2d028f817c 100644 --- a/src/openai/types/beta/realtime/session.py +++ b/src/openai/types/beta/realtime/session.py @@ -63,7 +63,12 @@ class Session(BaseModel): """Unique identifier for the session object.""" input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: Optional[InputAudioTranscription] = None """ @@ -117,7 +122,11 @@ class Session(BaseModel): """The Realtime model used for this session.""" output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: Optional[float] = None """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_create_params.py b/src/openai/types/beta/realtime/session_create_params.py index f56f2c5c22..3708efeecd 100644 --- a/src/openai/types/beta/realtime/session_create_params.py +++ b/src/openai/types/beta/realtime/session_create_params.py @@ -3,25 +3,19 @@ from __future__ import annotations from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, TypedDict __all__ = ["SessionCreateParams", "InputAudioTranscription", "Tool", "TurnDetection"] class SessionCreateParams(TypedDict, total=False): - model: Required[ - Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - ] - """The Realtime model used for this session.""" - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: InputAudioTranscription """ @@ -61,8 +55,21 @@ class SessionCreateParams(TypedDict, total=False): To disable audio, set this to ["text"]. """ + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: float """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_update_event.py b/src/openai/types/beta/realtime/session_update_event.py index c04220aa25..322e588a4e 100644 --- a/src/openai/types/beta/realtime/session_update_event.py +++ b/src/openai/types/beta/realtime/session_update_event.py @@ -65,17 +65,13 @@ class SessionTurnDetection(BaseModel): class Session(BaseModel): - model: Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - """The Realtime model used for this session.""" - input_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: Optional[SessionInputAudioTranscription] = None """ @@ -115,8 +111,23 @@ class Session(BaseModel): To disable audio, set this to ["text"]. """ + model: Optional[ + Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + ] = None + """The Realtime model used for this session.""" + output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: Optional[float] = None """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/realtime/session_update_event_param.py b/src/openai/types/beta/realtime/session_update_event_param.py index aa06069b04..c01d9b6887 100644 --- a/src/openai/types/beta/realtime/session_update_event_param.py +++ b/src/openai/types/beta/realtime/session_update_event_param.py @@ -71,19 +71,13 @@ class SessionTurnDetection(TypedDict, total=False): class Session(TypedDict, total=False): - model: Required[ - Literal[ - "gpt-4o-realtime-preview", - "gpt-4o-realtime-preview-2024-10-01", - "gpt-4o-realtime-preview-2024-12-17", - "gpt-4o-mini-realtime-preview", - "gpt-4o-mini-realtime-preview-2024-12-17", - ] - ] - """The Realtime model used for this session.""" - input_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of input audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, input audio must + be 16-bit PCM at a 24kHz sample rate, single channel (mono), and little-endian + byte order. + """ input_audio_transcription: SessionInputAudioTranscription """ @@ -123,8 +117,21 @@ class Session(TypedDict, total=False): To disable audio, set this to ["text"]. """ + model: Literal[ + "gpt-4o-realtime-preview", + "gpt-4o-realtime-preview-2024-10-01", + "gpt-4o-realtime-preview-2024-12-17", + "gpt-4o-mini-realtime-preview", + "gpt-4o-mini-realtime-preview-2024-12-17", + ] + """The Realtime model used for this session.""" + output_audio_format: Literal["pcm16", "g711_ulaw", "g711_alaw"] - """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" + """The format of output audio. + + Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For `pcm16`, output audio is + sampled at a rate of 24kHz. + """ temperature: float """Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.""" diff --git a/src/openai/types/beta/static_file_chunking_strategy_object_param.py b/src/openai/types/beta/static_file_chunking_strategy_object_param.py new file mode 100644 index 0000000000..0cdf35c0df --- /dev/null +++ b/src/openai/types/beta/static_file_chunking_strategy_object_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam + +__all__ = ["StaticFileChunkingStrategyObjectParam"] + + +class StaticFileChunkingStrategyObjectParam(TypedDict, total=False): + static: Required[StaticFileChunkingStrategyParam] + + type: Required[Literal["static"]] + """Always `static`.""" diff --git a/src/openai/types/chat/chat_completion.py b/src/openai/types/chat/chat_completion.py index 4b53e70890..cb812a2702 100644 --- a/src/openai/types/chat/chat_completion.py +++ b/src/openai/types/chat/chat_completion.py @@ -60,11 +60,7 @@ class ChatCompletion(BaseModel): """The object type, which is always `chat.completion`.""" service_tier: Optional[Literal["scale", "default"]] = None - """The service tier used for processing the request. - - This field is only included if the `service_tier` parameter is specified in the - request. - """ + """The service tier used for processing the request.""" system_fingerprint: Optional[str] = None """This fingerprint represents the backend configuration that the model runs with. diff --git a/src/openai/types/chat/chat_completion_assistant_message_param.py b/src/openai/types/chat/chat_completion_assistant_message_param.py index 35e3a3d784..229fb822f4 100644 --- a/src/openai/types/chat/chat_completion_assistant_message_param.py +++ b/src/openai/types/chat/chat_completion_assistant_message_param.py @@ -38,8 +38,8 @@ class ChatCompletionAssistantMessageParam(TypedDict, total=False): """The role of the messages author, in this case `assistant`.""" audio: Optional[Audio] - """Data about a previous audio response from the model. - + """ + Data about a previous audio response from the model. [Learn more](https://platform.openai.com/docs/guides/audio). """ diff --git a/src/openai/types/chat/chat_completion_chunk.py b/src/openai/types/chat/chat_completion_chunk.py index 9ec6dc4bdb..7b0ae2e121 100644 --- a/src/openai/types/chat/chat_completion_chunk.py +++ b/src/openai/types/chat/chat_completion_chunk.py @@ -129,11 +129,7 @@ class ChatCompletionChunk(BaseModel): """The object type, which is always `chat.completion.chunk`.""" service_tier: Optional[Literal["scale", "default"]] = None - """The service tier used for processing the request. - - This field is only included if the `service_tier` parameter is specified in the - request. - """ + """The service tier used for processing the request.""" system_fingerprint: Optional[str] = None """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index f168ddea6e..30d930b120 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -220,9 +220,6 @@ class CompletionCreateParamsBase(TypedDict, total=False): - If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee. - When not set, the default behavior is 'auto'. - - When this parameter is set, the response body will include the `service_tier` - utilized. """ stop: Union[Optional[str], List[str]] diff --git a/src/openai/types/embedding_create_params.py b/src/openai/types/embedding_create_params.py index 1385762885..a90566449b 100644 --- a/src/openai/types/embedding_create_params.py +++ b/src/openai/types/embedding_create_params.py @@ -19,7 +19,8 @@ class EmbeddingCreateParams(TypedDict, total=False): (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. + for counting tokens. Some models may also impose a limit on total number of + tokens summed across inputs. """ model: Required[Union[str, EmbeddingModel]] diff --git a/tests/api_resources/beta/realtime/test_sessions.py b/tests/api_resources/beta/realtime/test_sessions.py index 65bfa27572..908aa983be 100644 --- a/tests/api_resources/beta/realtime/test_sessions.py +++ b/tests/api_resources/beta/realtime/test_sessions.py @@ -19,20 +19,18 @@ class TestSessions: @parametrize def test_method_create(self, client: OpenAI) -> None: - session = client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", - ) + session = client.beta.realtime.sessions.create() assert_matches_type(SessionCreateResponse, session, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: session = client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", input_audio_format="pcm16", input_audio_transcription={"model": "model"}, instructions="instructions", max_response_output_tokens=0, modalities=["text"], + model="gpt-4o-realtime-preview", output_audio_format="pcm16", temperature=0, tool_choice="tool_choice", @@ -57,9 +55,7 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None: @parametrize def test_raw_response_create(self, client: OpenAI) -> None: - response = client.beta.realtime.sessions.with_raw_response.create( - model="gpt-4o-realtime-preview", - ) + response = client.beta.realtime.sessions.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -68,9 +64,7 @@ def test_raw_response_create(self, client: OpenAI) -> None: @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: - with client.beta.realtime.sessions.with_streaming_response.create( - model="gpt-4o-realtime-preview", - ) as response: + with client.beta.realtime.sessions.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -85,20 +79,18 @@ class TestAsyncSessions: @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: - session = await async_client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", - ) + session = await async_client.beta.realtime.sessions.create() assert_matches_type(SessionCreateResponse, session, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: session = await async_client.beta.realtime.sessions.create( - model="gpt-4o-realtime-preview", input_audio_format="pcm16", input_audio_transcription={"model": "model"}, instructions="instructions", max_response_output_tokens=0, modalities=["text"], + model="gpt-4o-realtime-preview", output_audio_format="pcm16", temperature=0, tool_choice="tool_choice", @@ -123,9 +115,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: - response = await async_client.beta.realtime.sessions.with_raw_response.create( - model="gpt-4o-realtime-preview", - ) + response = await async_client.beta.realtime.sessions.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -134,9 +124,7 @@ async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: - async with async_client.beta.realtime.sessions.with_streaming_response.create( - model="gpt-4o-realtime-preview", - ) as response: + async with async_client.beta.realtime.sessions.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 393a790549..25c9a36164 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -72,7 +72,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -187,7 +187,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -321,7 +321,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, @@ -436,7 +436,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn presence_penalty=-2, reasoning_effort="low", response_format={"type": "text"}, - seed=-9007199254740991, + seed=0, service_tier="auto", stop="string", store=True, diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index ad2679cabe..9ec503c1e3 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -38,7 +38,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -98,7 +98,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream_options={"include_usage": True}, suffix="test.", @@ -160,7 +160,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream=False, stream_options={"include_usage": True}, @@ -220,7 +220,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn max_tokens=16, n=1, presence_penalty=-2, - seed=-9007199254740991, + seed=0, stop="\n", stream_options={"include_usage": True}, suffix="test.", diff --git a/tests/lib/test_pydantic.py b/tests/lib/test_pydantic.py index 99b9e96d21..7e128b70c0 100644 --- a/tests/lib/test_pydantic.py +++ b/tests/lib/test_pydantic.py @@ -7,6 +7,7 @@ import openai from openai._compat import PYDANTIC_V2 +from openai.lib._pydantic import to_strict_json_schema from .schema_types.query import Query @@ -235,3 +236,176 @@ def test_enums() -> None: }, } ) + + +class Star(BaseModel): + name: str = Field(description="The name of the star.") + + +class Galaxy(BaseModel): + name: str = Field(description="The name of the galaxy.") + largest_star: Star = Field(description="The largest star in the galaxy.") + + +class Universe(BaseModel): + name: str = Field(description="The name of the universe.") + galaxy: Galaxy = Field(description="A galaxy in the universe.") + + +def test_nested_inline_ref_expansion() -> None: + if PYDANTIC_V2: + assert to_strict_json_schema(Universe) == snapshot( + { + "title": "Universe", + "type": "object", + "$defs": { + "Star": { + "title": "Star", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the star.", + } + }, + "required": ["name"], + "additionalProperties": False, + }, + "Galaxy": { + "title": "Galaxy", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the galaxy.", + }, + "largest_star": { + "title": "Star", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the star.", + } + }, + "required": ["name"], + "description": "The largest star in the galaxy.", + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "additionalProperties": False, + }, + }, + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the universe.", + }, + "galaxy": { + "title": "Galaxy", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the galaxy.", + }, + "largest_star": { + "title": "Star", + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name", + "description": "The name of the star.", + } + }, + "required": ["name"], + "description": "The largest star in the galaxy.", + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "description": "A galaxy in the universe.", + "additionalProperties": False, + }, + }, + "required": ["name", "galaxy"], + "additionalProperties": False, + } + ) + else: + assert to_strict_json_schema(Universe) == snapshot( + { + "title": "Universe", + "type": "object", + "definitions": { + "Star": { + "title": "Star", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the star.", "type": "string"} + }, + "required": ["name"], + "additionalProperties": False, + }, + "Galaxy": { + "title": "Galaxy", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the galaxy.", "type": "string"}, + "largest_star": { + "title": "Largest Star", + "description": "The largest star in the galaxy.", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the star.", "type": "string"} + }, + "required": ["name"], + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "additionalProperties": False, + }, + }, + "properties": { + "name": { + "title": "Name", + "description": "The name of the universe.", + "type": "string", + }, + "galaxy": { + "title": "Galaxy", + "description": "A galaxy in the universe.", + "type": "object", + "properties": { + "name": { + "title": "Name", + "description": "The name of the galaxy.", + "type": "string", + }, + "largest_star": { + "title": "Largest Star", + "description": "The largest star in the galaxy.", + "type": "object", + "properties": { + "name": {"title": "Name", "description": "The name of the star.", "type": "string"} + }, + "required": ["name"], + "additionalProperties": False, + }, + }, + "required": ["name", "largest_star"], + "additionalProperties": False, + }, + }, + "required": ["name", "galaxy"], + "additionalProperties": False, + } + ) diff --git a/tests/test_client.py b/tests/test_client.py index e0d23403b1..41da2d5d04 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -6,6 +6,7 @@ import os import sys import json +import time import asyncio import inspect import subprocess @@ -1797,10 +1798,20 @@ async def test_main() -> None: [sys.executable, "-c", test_code], text=True, ) as process: - try: - process.wait(2) - if process.returncode: - raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") - except subprocess.TimeoutExpired as e: - process.kill() - raise AssertionError("calling get_platform using asyncify resulted in a hung process") from e + timeout = 10 # seconds + + start_time = time.monotonic() + while True: + return_code = process.poll() + if return_code is not None: + if return_code != 0: + raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code") + + # success + break + + if time.monotonic() - start_time > timeout: + process.kill() + raise AssertionError("calling get_platform using asyncify resulted in a hung process") + + time.sleep(0.1)