diff --git a/.genignore b/.genignore index 1186de6..1ded567 100644 --- a/.genignore +++ b/.genignore @@ -1,2 +1,3 @@ pyproject.toml -examples/* \ No newline at end of file +examples/* +src/mistral/extra/* diff --git a/.github/workflows/lint_custom_code.yaml b/.github/workflows/lint_custom_code.yaml new file mode 100644 index 0000000..f928918 --- /dev/null +++ b/.github/workflows/lint_custom_code.yaml @@ -0,0 +1,32 @@ +name: Linting Python custom files + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install ruff + run: pip install ruff + + - name: Lint with ruff + # No need to lint the automatically generated Speakeasy code + run: | + ruff check examples/ + ruff check src/mistralai/_hooks/ --exclude __init__.py --exclude sdkhooks.py --exclude types.py + ruff check src/mistralai/extra/ diff --git a/.github/workflows/test_custom_code.yaml b/.github/workflows/test_custom_code.yaml new file mode 100644 index 0000000..230066c --- /dev/null +++ b/.github/workflows/test_custom_code.yaml @@ -0,0 +1,48 @@ +name: Testing Python custom files + +on: + push: + branches: + - main + pull_request: + branches: + - main + +jobs: + lint: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Python + id: setup-python + uses: actions/setup-python@v4 + with: + python-version: '3.12' + + - name: Install Poetry + uses: snok/install-poetry@76e04a911780d5b312d89783f7b1cd627778900a # v1.4.1 + with: + virtualenvs-create: true + virtualenvs-in-project: true + virtualenvs-path: .venv + installer-parallel: true + + - name: Load cached venv + id: cached-poetry-dependencies + uses: actions/cache@v4 + with: + path: .venv + key: venv-${{ runner.os }}-${{ steps.setup-python.outputs.python-version }}-${{ hashFiles('**/poetry.lock') }} + + - name: Install dependencies + # Install dependencies if cache does not exist + if: steps.cached-poetry-dependencies.outputs.cache-hit != 'true' + run: poetry install --no-interaction --no-root + + - name: Run the 'src/mistralai/extra' package unit tests + run: | + source .venv/bin/activate + python3.12 -m unittest discover -s src/mistralai/extra/tests -t src diff --git a/.speakeasy/gen.lock b/.speakeasy/gen.lock index 722fd29..ac3e011 100644 --- a/.speakeasy/gen.lock +++ b/.speakeasy/gen.lock @@ -1,12 +1,12 @@ lockVersion: 2.0.0 id: 2d045ec7-2ebb-4f4d-ad25-40953b132161 management: - docChecksum: dbfa566129ede53f4e3b2c91e81f6f74 + docChecksum: 553c31591e8dc33a58cb75f348c3aa72 docVersion: 0.0.2 - speakeasyVersion: 1.469.11 - generationVersion: 2.493.32 - releaseVersion: 1.4.0 - configChecksum: 46cde4e28fb5efba97051b54ac2e1c83 + speakeasyVersion: 1.477.0 + generationVersion: 2.497.0 + releaseVersion: 1.5.0 + configChecksum: 9a5649c5c372dc5fd2fde38a0faee40e repoURL: https://github.com/mistralai/client-python.git installationURL: https://github.com/mistralai/client-python.git published: true @@ -14,7 +14,8 @@ features: python: additionalDependencies: 1.0.0 constsAndDefaults: 1.0.5 - core: 5.10.4 + core: 5.10.5 + customCodeRegions: 0.1.1 defaultEnabledRetries: 0.2.0 downloadStreams: 1.0.1 enumUnions: 0.1.0 @@ -155,6 +156,7 @@ generatedFiles: - docs/models/jobsapiroutesfinetuningupdatefinetunedmodelrequest.md - docs/models/jobsout.md - docs/models/jobsoutobject.md + - docs/models/jsonschema.md - docs/models/legacyjobmetadataout.md - docs/models/legacyjobmetadataoutobject.md - docs/models/listfilesout.md @@ -319,6 +321,7 @@ generatedFiles: - src/mistralai/models/jobs_api_routes_fine_tuning_unarchive_fine_tuned_modelop.py - src/mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py - src/mistralai/models/jobsout.py + - src/mistralai/models/jsonschema.py - src/mistralai/models/legacyjobmetadataout.py - src/mistralai/models/listfilesout.py - src/mistralai/models/metricout.py @@ -566,6 +569,7 @@ examples: responses: "422": application/json: {} + "200": {} fim_completion_v1_fim_completions_post: speakeasy-default-fim-completion-v1-fim-completions-post: requestBody: @@ -582,6 +586,7 @@ examples: responses: "422": application/json: {} + "200": {} agents_completion_v1_agents_completions_post: speakeasy-default-agents-completion-v1-agents-completions-post: requestBody: @@ -598,6 +603,7 @@ examples: responses: "422": application/json: {} + "200": {} embeddings_v1_embeddings_post: speakeasy-default-embeddings-v1-embeddings-post: requestBody: diff --git a/.speakeasy/gen.yaml b/.speakeasy/gen.yaml index 30a1083..069ce07 100644 --- a/.speakeasy/gen.yaml +++ b/.speakeasy/gen.yaml @@ -13,7 +13,7 @@ generation: oAuth2ClientCredentialsEnabled: true oAuth2PasswordEnabled: false python: - version: 1.4.0 + version: 1.5.0 additionalDependencies: dev: pytest: ^8.2.2 @@ -23,7 +23,7 @@ python: clientServerStatusCodesAsErrors: true defaultErrorName: SDKError description: Python Client SDK for the Mistral AI API. - enableCustomCodeRegions: false + enableCustomCodeRegions: true enumFormat: union envVarPrefix: MISTRAL fixFlags: diff --git a/.speakeasy/workflow.lock b/.speakeasy/workflow.lock index 36a7452..ea74f7d 100644 --- a/.speakeasy/workflow.lock +++ b/.speakeasy/workflow.lock @@ -1,4 +1,4 @@ -speakeasyVersion: 1.469.11 +speakeasyVersion: 1.477.0 sources: mistral-azure-source: sourceNamespace: mistral-azure-source @@ -14,11 +14,10 @@ sources: - latest mistral-openapi: sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:c414dd5eecca5f02fe9012a1d131f696e0257fe100c371609272dbc6c522ef07 - sourceBlobDigest: sha256:f48af039106d00de84345fd095fbf4831f18fbeeef07e9ff7bba70a0e07eda07 + sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d + sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c tags: - latest - - speakeasy-sdk-regen-1737393201 targets: mistralai-azure-sdk: source: mistral-azure-source @@ -37,10 +36,10 @@ targets: mistralai-sdk: source: mistral-openapi sourceNamespace: mistral-openapi - sourceRevisionDigest: sha256:c414dd5eecca5f02fe9012a1d131f696e0257fe100c371609272dbc6c522ef07 - sourceBlobDigest: sha256:f48af039106d00de84345fd095fbf4831f18fbeeef07e9ff7bba70a0e07eda07 + sourceRevisionDigest: sha256:af4a2854e017abc0ec9e4b557186611dcd69468d82d5ac7f81bfbe49165fc18d + sourceBlobDigest: sha256:9f1bbc418fba3c7b5031bacdf9d431aff476fb4b2aa3838ed50fb3922563703c codeSamplesNamespace: mistral-openapi-code-samples - codeSamplesRevisionDigest: sha256:3f61d33c46733b24ecd422423900425b381529da038992e59bdb5a9b766bdf89 + codeSamplesRevisionDigest: sha256:cbf9b277d16c47816fc5d63b4c69cf0fbd1fe99d424c34ab465d2b61fcc6e5e8 workflow: workflowVersion: 1.0.0 speakeasyVersion: latest diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 8d79f0a..0000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "python.testing.pytestArgs": ["tests", "-vv"], - "python.testing.unittestEnabled": false, - "python.testing.pytestEnabled": true, - "pylint.args": ["--rcfile=pylintrc"] -} diff --git a/README.md b/README.md index dc49259..617c607 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,7 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo * [Server Selection](#server-selection) * [Custom HTTP Client](#custom-http-client) * [Authentication](#authentication) + * [Resource Management](#resource-management) * [Debugging](#debugging) * [IDE Support](#ide-support) * [Development](#development) @@ -52,6 +53,11 @@ Mistral AI API: Our Chat Completion and Embeddings APIs specification. Create yo ## SDK Installation +> [!NOTE] +> **Python version upgrade policy** +> +> Once a Python version reaches its [official end of life date](https://devguide.python.org/versions/), a 3-month grace period is provided for users to upgrade. Following this grace period, the minimum python version supported in the SDK will be updated. + The SDK can be installed with either *pip* or *poetry* package managers. ### PIP @@ -754,6 +760,32 @@ with Mistral( ``` + +## Resource Management + +The `Mistral` class implements the context manager protocol and registers a finalizer function to close the underlying sync and async HTTPX clients it uses under the hood. This will close HTTP connections, release memory and free up other resources held by the SDK. In short-lived Python programs and notebooks that make a few SDK method calls, resource management may not be a concern. However, in longer-lived programs, it is beneficial to create a single SDK instance via a [context manager][context-manager] and reuse it across the application. + +[context-manager]: https://docs.python.org/3/reference/datamodel.html#context-managers + +```python +from mistralai import Mistral +import os +def main(): + with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + # Rest of application here... + + +# Or when using async: +async def amain(): + async with Mistral( + api_key=os.getenv("MISTRAL_API_KEY", ""), + ) as mistral: + # Rest of application here... +``` + + ## Debugging diff --git a/RELEASES.md b/RELEASES.md index f441230..cc8c6c2 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -148,4 +148,14 @@ Based on: ### Generated - [python v1.4.0] . ### Releases -- [PyPI v1.4.0] https://pypi.org/project/mistralai/1.4.0 - . \ No newline at end of file +- [PyPI v1.4.0] https://pypi.org/project/mistralai/1.4.0 - . + +## 2025-01-27 13:57:39 +### Changes +Based on: +- OpenAPI Doc +- Speakeasy CLI 1.476.2 (2.495.1) https://github.com/speakeasy-api/speakeasy +### Generated +- [python v1.5.0] . +### Releases +- [PyPI v1.5.0] https://pypi.org/project/mistralai/1.5.0 - . \ No newline at end of file diff --git a/docs/models/jsonschema.md b/docs/models/jsonschema.md new file mode 100644 index 0000000..ae38786 --- /dev/null +++ b/docs/models/jsonschema.md @@ -0,0 +1,11 @@ +# JSONSchema + + +## Fields + +| Field | Type | Required | Description | +| ----------------------- | ----------------------- | ----------------------- | ----------------------- | +| `name` | *str* | :heavy_check_mark: | N/A | +| `schema_definition` | Dict[str, *Any*] | :heavy_check_mark: | N/A | +| `description` | *OptionalNullable[str]* | :heavy_minus_sign: | N/A | +| `strict` | *Optional[bool]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformat.md b/docs/models/responseformat.md index 9c627f5..23a1641 100644 --- a/docs/models/responseformat.md +++ b/docs/models/responseformat.md @@ -5,4 +5,5 @@ | Field | Type | Required | Description | | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | \ No newline at end of file +| `type` | [Optional[models.ResponseFormats]](../models/responseformats.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message. | +| `json_schema` | [OptionalNullable[models.JSONSchema]](../models/jsonschema.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/responseformats.md b/docs/models/responseformats.md index ce35fbb..06886af 100644 --- a/docs/models/responseformats.md +++ b/docs/models/responseformats.md @@ -8,4 +8,5 @@ An object specifying the format that the model must output. Setting to `{ "type" | Name | Value | | ------------- | ------------- | | `TEXT` | text | -| `JSON_OBJECT` | json_object | \ No newline at end of file +| `JSON_OBJECT` | json_object | +| `JSON_SCHEMA` | json_schema | \ No newline at end of file diff --git a/examples/async_agents_no_streaming.py b/examples/async_agents_no_streaming.py index 799333b..45f300a 100755 --- a/examples/async_agents_no_streaming.py +++ b/examples/async_agents_no_streaming.py @@ -9,11 +9,12 @@ async def main(): api_key = os.environ["MISTRAL_API_KEY"] + agent_id = os.environ["MISTRAL_AGENT_ID"] client = Mistral(api_key=api_key) chat_response = await client.agents.complete_async( - agent_id="", + agent_id=agent_id, messages=[UserMessage(content="What is the best French cheese?")], ) diff --git a/examples/async_chat_with_image_no_streaming.py b/examples/async_chat_with_image_no_streaming.py index 22f9adc..7e41530 100755 --- a/examples/async_chat_with_image_no_streaming.py +++ b/examples/async_chat_with_image_no_streaming.py @@ -3,10 +3,9 @@ import asyncio import os -import httpx from mistralai import Mistral -from mistralai.models import ImageURLChunk, TextChunk, UserMessage +from mistralai.models import UserMessage async def main(): diff --git a/examples/async_structured_outputs.py b/examples/async_structured_outputs.py new file mode 100644 index 0000000..4fafc99 --- /dev/null +++ b/examples/async_structured_outputs.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python + +import asyncio +import os +from pydantic import BaseModel + +from mistralai import Mistral + +async def main(): + + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + chat_response = await client.chat.parse_async( + model="mistral-large-2411", + messages=[ + {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format = MathDemonstration + ) + print(chat_response.choices[0].message.parsed) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/chat_with_streaming.py b/examples/chat_with_streaming.py index 5fc7503..19d48a1 100755 --- a/examples/chat_with_streaming.py +++ b/examples/chat_with_streaming.py @@ -8,7 +8,7 @@ def main(): api_key = os.environ["MISTRAL_API_KEY"] - model = "mistral-tiny" + model = "mistral-large-latest" client = Mistral(api_key=api_key) @@ -17,7 +17,7 @@ def main(): messages=[UserMessage(content="What is the best French cheese?")], ): - print(chunk.data.choices[0].delta.content) + print(chunk.data.choices[0].delta.content, end="") if __name__ == "__main__": diff --git a/examples/function_calling.py b/examples/function_calling.py index 766a825..e7eba59 100644 --- a/examples/function_calling.py +++ b/examples/function_calling.py @@ -117,6 +117,8 @@ def retrieve_payment_date(data: Dict[str, List], transaction_id: str) -> str: tool_call_id=tool_call.id, ) ) +print(messages) + response = client.chat.complete(model=model, messages=messages, tools=tools) print(f"{response.choices[0].message.content}") diff --git a/examples/structured_outputs.py b/examples/structured_outputs.py new file mode 100644 index 0000000..15dc1bf --- /dev/null +++ b/examples/structured_outputs.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +import os +from pydantic import BaseModel + +from mistralai import Mistral + +def main(): + api_key = os.environ["MISTRAL_API_KEY"] + client = Mistral(api_key=api_key) + + class Explanation(BaseModel): + explanation: str + output: str + + class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + print("Using the .parse method to parse the response into a Pydantic model:\n") + chat_response = client.chat.parse( + model="mistral-large-latest", + messages=[ + {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format = MathDemonstration + ) + print(chat_response.choices[0].message.parsed) + + # Or with the streaming API + print("\nUsing the .parse_stream method to stream back the response into a JSON Schema:\n") + with client.chat.parse_stream( + model="mistral-large-latest", + messages=[ + {"role": "system", "content": "You are a helpful math tutor. You will be provided with a math problem, and your goal will be to output a step by step solution, along with a final answer. For each step, just provide the output as an equation use the explanation field to detail the reasoning."}, + {"role": "user", "content": "How can I solve 8x + 7 = -23"}, + ], + response_format=MathDemonstration + ) as stream: + for chunk in stream: + print(chunk.data.choices[0].delta.content, end="") + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 79ebece..42f36f1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "mistralai" -version = "1.4.0" +version = "1.5.0" description = "Python Client SDK for the Mistral AI API." authors = ["Mistral"] readme = "README-PYPI.md" diff --git a/src/mistralai/_version.py b/src/mistralai/_version.py index b39db76..7f36cf1 100644 --- a/src/mistralai/_version.py +++ b/src/mistralai/_version.py @@ -3,10 +3,10 @@ import importlib.metadata __title__: str = "mistralai" -__version__: str = "1.4.0" +__version__: str = "1.5.0" __openapi_doc_version__: str = "0.0.2" -__gen_version__: str = "2.493.32" -__user_agent__: str = "speakeasy-sdk/python 1.4.0 2.493.32 0.0.2 mistralai" +__gen_version__: str = "2.497.0" +__user_agent__: str = "speakeasy-sdk/python 1.5.0 2.497.0 0.0.2 mistralai" try: if __package__ is not None: diff --git a/src/mistralai/chat.py b/src/mistralai/chat.py index 547cc04..55ad60a 100644 --- a/src/mistralai/chat.py +++ b/src/mistralai/chat.py @@ -7,10 +7,92 @@ from mistralai.utils import eventstreaming, get_security_from_env from typing import Any, List, Mapping, Optional, Union +# region imports +from typing import Type +from mistralai.extra import ( + convert_to_parsed_chat_completion_response, + response_format_from_pydantic_model, + CustomPydanticModel, + ParsedChatCompletionResponse, +) +# endregion imports + class Chat(BaseSDK): r"""Chat Completion API.""" + # region sdk-class-body + # Custom .parse methods for the Structure Outputs Feature. + + def parse( + self, response_format: Type[CustomPydanticModel], **kwargs: Any + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + # Convert the input Pydantic Model to a strict JSON ready to be passed to chat.complete + json_response_format = response_format_from_pydantic_model(response_format) + # Run the inference + response = self.complete(**kwargs, response_format=json_response_format) + # Parse response back to the input pydantic model + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + async def parse_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> ParsedChatCompletionResponse[CustomPydanticModel]: + """ + Asynchronously parse the response using the provided response format. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .complete method + :return: The parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.complete_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + parsed_response = convert_to_parsed_chat_completion_response( + response, response_format + ) + return parsed_response + + def parse_stream( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStream[models.CompletionEvent]: + """ + Parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = self.stream(**kwargs, response_format=json_response_format) + return response + + async def parse_stream_async( + self, response_format: Type[CustomPydanticModel], **kwargs + ) -> eventstreaming.EventStreamAsync[models.CompletionEvent]: + """ + Asynchronously parse the response using the provided response format. + For now the response will be in JSON format not in the input Pydantic model. + :param Type[CustomPydanticModel] response_format: The Pydantic model to parse the response into + :param Any **kwargs Additional keyword arguments to pass to the .stream method + :return: The JSON parsed response + """ + json_response_format = response_format_from_pydantic_model(response_format) + response = await self.stream_async( # pylint: disable=E1125 + **kwargs, response_format=json_response_format + ) + return response + + # endregion sdk-class-body + def complete( self, *, diff --git a/src/mistralai/extra/README.md b/src/mistralai/extra/README.md new file mode 100644 index 0000000..dfce43b --- /dev/null +++ b/src/mistralai/extra/README.md @@ -0,0 +1,56 @@ +## Context + +The extra package contains the custom logic which is too complex to be generated by Speakeasy from the OpenAPI specs. It was introduced to add the Structured Outputs feature. + +## Development / Contributing + +To add custom code in the SDK, you need to use [Speakeasy custom code regions](https://www.speakeasy.com/docs/customize/code/code-regions/overview) as below. + +### Runbook of SDK customization + +1. Add the code you want to import in the `src/mistralai/extra/` package. To have it importable from the SDK, you need to add it in the `__init__.py` file: +```python +from .my_custom_file import my_custom_function + +__all__ = ["my_custom_function"] +``` + +2. Add a new custom code region in the SDK files, e.g in `src/mistralai/chat.py`: +```python +# region imports +from typing import Type +from mistralai.extra import my_custom_function +# endregion imports + +class Chat(BaseSDK): + r"""Chat Completion API.""" + + # region sdk-class-body + def my_custom_method(self, param: str) -> Type[some_type]: + output = my_custom_function(param1) + return output + # endregion sdk-class-body +``` + +3. Now build the SDK with the custom code: +```bash +rm -rf dist; poetry build; python3 -m pip install ~/client-python/dist/mistralai-1.4.1-py3-none-any.whl --force-reinstall +``` + +4. And now you should be able to call the custom method: +```python +import os +from mistralai import Mistral + +api_key = os.environ["MISTRAL_API_KEY"] +client = Mistral(api_key=api_key) + +client.chat.my_custom_method(param="test") +``` + +### Run the unit tests + +To run the unit tests for the `extra` package, you can run the following command from the root of the repository: +```bash +python3.12 -m unittest discover -s src/mistralai/extra/tests -t src +``` diff --git a/src/mistralai/extra/__init__.py b/src/mistralai/extra/__init__.py new file mode 100644 index 0000000..d8f7a21 --- /dev/null +++ b/src/mistralai/extra/__init__.py @@ -0,0 +1,5 @@ +from .struct_chat import ParsedChatCompletionResponse, convert_to_parsed_chat_completion_response +from .utils import response_format_from_pydantic_model +from .utils.response_format import CustomPydanticModel + +__all__ = ["convert_to_parsed_chat_completion_response", "response_format_from_pydantic_model", "CustomPydanticModel", "ParsedChatCompletionResponse"] diff --git a/src/mistralai/extra/struct_chat.py b/src/mistralai/extra/struct_chat.py new file mode 100644 index 0000000..364b450 --- /dev/null +++ b/src/mistralai/extra/struct_chat.py @@ -0,0 +1,41 @@ +from ..models import ChatCompletionResponse, ChatCompletionChoice, AssistantMessage +from .utils.response_format import CustomPydanticModel, pydantic_model_from_json +from typing import List, Optional, Type, Generic +from pydantic import BaseModel +import json + +class ParsedAssistantMessage(AssistantMessage, Generic[CustomPydanticModel]): + parsed: Optional[CustomPydanticModel] + +class ParsedChatCompletionChoice(ChatCompletionChoice, Generic[CustomPydanticModel]): + message: Optional[ParsedAssistantMessage[CustomPydanticModel]] # type: ignore + +class ParsedChatCompletionResponse(ChatCompletionResponse, Generic[CustomPydanticModel]): + choices: Optional[List[ParsedChatCompletionChoice[CustomPydanticModel]]] # type: ignore + +def convert_to_parsed_chat_completion_response(response: ChatCompletionResponse, response_format: Type[BaseModel]) -> ParsedChatCompletionResponse: + parsed_choices = [] + + if response.choices: + for choice in response.choices: + if choice.message: + parsed_message: ParsedAssistantMessage = ParsedAssistantMessage( + **choice.message.model_dump(), + parsed=None + ) + if isinstance(parsed_message.content, str): + parsed_message.parsed = pydantic_model_from_json(json.loads(parsed_message.content), response_format) + elif parsed_message.content is None: + parsed_message.parsed = None + else: + raise TypeError(f"Unexpected type for message.content: {type(parsed_message.content)}") + choice_dict = choice.model_dump() + choice_dict["message"] = parsed_message + parsed_choice: ParsedChatCompletionChoice = ParsedChatCompletionChoice(**choice_dict) + parsed_choices.append(parsed_choice) + else: + parsed_choice = ParsedChatCompletionChoice(**choice.model_dump()) + parsed_choices.append(parsed_choice) + response_dict = response.model_dump() + response_dict["choices"] = parsed_choices + return ParsedChatCompletionResponse(**response_dict) diff --git a/src/mistralai/extra/tests/__init__.py b/src/mistralai/extra/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/mistralai/extra/tests/test_struct_chat.py b/src/mistralai/extra/tests/test_struct_chat.py new file mode 100644 index 0000000..fd93575 --- /dev/null +++ b/src/mistralai/extra/tests/test_struct_chat.py @@ -0,0 +1,103 @@ +import unittest +from ..struct_chat import ( + convert_to_parsed_chat_completion_response, + ParsedChatCompletionResponse, + ParsedChatCompletionChoice, + ParsedAssistantMessage, +) +from ...models import ( + ChatCompletionResponse, + UsageInfo, + ChatCompletionChoice, + AssistantMessage, +) +from pydantic import BaseModel + + +class Explanation(BaseModel): + explanation: str + output: str + + +class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + +mock_cc_response = ChatCompletionResponse( + id="c0271b2098954c6094231703875ca0bc", + object="chat.completion", + model="mistral-large-latest", + usage=UsageInfo(prompt_tokens=75, completion_tokens=220, total_tokens=295), + created=1737727558, + choices=[ + ChatCompletionChoice( + index=0, + message=AssistantMessage( + content='{\n "final_answer": "x = -4",\n "steps": [\n {\n "explanation": "Start with the given equation.",\n "output": "8x + 7 = -23"\n },\n {\n "explanation": "Subtract 7 from both sides to isolate the term with x.",\n "output": "8x = -23 - 7"\n },\n {\n "explanation": "Simplify the right side of the equation.",\n "output": "8x = -30"\n },\n {\n "explanation": "Divide both sides by 8 to solve for x.",\n "output": "x = -30 / 8"\n },\n {\n "explanation": "Simplify the fraction to get the final answer.",\n "output": "x = -4"\n }\n ]\n}', + tool_calls=None, + prefix=False, + role="assistant", + ), + finish_reason="stop", + ) + ], +) + + +expected_response = ParsedChatCompletionResponse( + choices=[ + ParsedChatCompletionChoice( + index=0, + message=ParsedAssistantMessage( + content='{\n "final_answer": "x = -4",\n "steps": [\n {\n "explanation": "Start with the given equation.",\n "output": "8x + 7 = -23"\n },\n {\n "explanation": "Subtract 7 from both sides to isolate the term with x.",\n "output": "8x = -23 - 7"\n },\n {\n "explanation": "Simplify the right side of the equation.",\n "output": "8x = -30"\n },\n {\n "explanation": "Divide both sides by 8 to solve for x.",\n "output": "x = -30 / 8"\n },\n {\n "explanation": "Simplify the fraction to get the final answer.",\n "output": "x = -4"\n }\n ]\n}', + tool_calls=None, + prefix=False, + role="assistant", + parsed=MathDemonstration( + steps=[ + Explanation( + explanation="Start with the given equation.", + output="8x + 7 = -23", + ), + Explanation( + explanation="Subtract 7 from both sides to isolate the term with x.", + output="8x = -23 - 7", + ), + Explanation( + explanation="Simplify the right side of the equation.", + output="8x = -30", + ), + Explanation( + explanation="Divide both sides by 8 to solve for x.", + output="x = -30 / 8", + ), + Explanation( + explanation="Simplify the fraction to get the final answer.", + output="x = -4", + ), + ], + final_answer="x = -4", + ), + ), + finish_reason="stop", + ) + ], + created=1737727558, + id="c0271b2098954c6094231703875ca0bc", + model="mistral-large-latest", + object="chat.completion", + usage=UsageInfo(prompt_tokens=75, completion_tokens=220, total_tokens=295), +) + + +class TestConvertToParsedChatCompletionResponse(unittest.TestCase): + def test_convert_to_parsed_chat_completion_response(self): + output = convert_to_parsed_chat_completion_response( + mock_cc_response, MathDemonstration + ) + self.assertEqual(output, expected_response) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/mistralai/extra/tests/test_utils.py b/src/mistralai/extra/tests/test_utils.py new file mode 100644 index 0000000..41fa53e --- /dev/null +++ b/src/mistralai/extra/tests/test_utils.py @@ -0,0 +1,162 @@ +from ..utils.response_format import ( + pydantic_model_from_json, + response_format_from_pydantic_model, + rec_strict_json_schema, +) +from pydantic import BaseModel, ValidationError + +from ...models import ResponseFormat, JSONSchema +from ...types.basemodel import Unset + +import unittest + + +class Student(BaseModel): + name: str + age: int + + +class Explanation(BaseModel): + explanation: str + output: str + + +class MathDemonstration(BaseModel): + steps: list[Explanation] + final_answer: str + + +mathdemo_schema = { + "$defs": { + "Explanation": { + "properties": { + "explanation": {"title": "Explanation", "type": "string"}, + "output": {"title": "Output", "type": "string"}, + }, + "required": ["explanation", "output"], + "title": "Explanation", + "type": "object", + } + }, + "properties": { + "steps": { + "items": {"$ref": "#/$defs/Explanation"}, + "title": "Steps", + "type": "array", + }, + "final_answer": {"title": "Final Answer", "type": "string"}, + }, + "required": ["steps", "final_answer"], + "title": "MathDemonstration", + "type": "object", +} + +mathdemo_strict_schema = mathdemo_schema.copy() +mathdemo_strict_schema["$defs"]["Explanation"]["additionalProperties"] = False # type: ignore +mathdemo_strict_schema["additionalProperties"] = False + +mathdemo_response_format = ResponseFormat( + type="json_schema", + json_schema=JSONSchema( + name="MathDemonstration", + schema_definition=mathdemo_strict_schema, + description=Unset(), + strict=True, + ), +) + + +class TestResponseFormat(unittest.TestCase): + def test_pydantic_model_from_json(self): + missing_json_data = {"name": "Jean Dupont"} + good_json_data = {"name": "Jean Dupont", "age": 25} + extra_json_data = { + "name": "Jean Dupont", + "age": 25, + "extra_field": "extra_value", + } + complex_json_data = { + "final_answer": "x = -4", + "steps": [ + { + "explanation": "Start with the given equation.", + "output": "8x + 7 = -23", + }, + { + "explanation": "Subtract 7 from both sides to isolate the term with x.", + "output": "8x = -23 - 7", + }, + { + "explanation": "Simplify the right side of the equation.", + "output": "8x = -30", + }, + { + "explanation": "Divide both sides by 8 to solve for x.", + "output": "x = -30 / 8", + }, + { + "explanation": "Simplify the fraction to get the final answer.", + "output": "x = -4", + }, + ], + } + + self.assertEqual( + pydantic_model_from_json(good_json_data, Student), + Student(name="Jean Dupont", age=25), + ) + self.assertEqual( + pydantic_model_from_json(extra_json_data, Student), + Student(name="Jean Dupont", age=25), + ) + self.assertEqual( + pydantic_model_from_json(complex_json_data, MathDemonstration), + MathDemonstration( + steps=[ + Explanation( + explanation="Start with the given equation.", + output="8x + 7 = -23", + ), + Explanation( + explanation="Subtract 7 from both sides to isolate the term with x.", + output="8x = -23 - 7", + ), + Explanation( + explanation="Simplify the right side of the equation.", + output="8x = -30", + ), + Explanation( + explanation="Divide both sides by 8 to solve for x.", + output="x = -30 / 8", + ), + Explanation( + explanation="Simplify the fraction to get the final answer.", + output="x = -4", + ), + ], + final_answer="x = -4", + ), + ) + + # Check it raises a validation error + with self.assertRaises(ValidationError): + pydantic_model_from_json(missing_json_data, Student) # type: ignore + + def test_response_format_from_pydantic_model(self): + self.assertEqual( + response_format_from_pydantic_model(MathDemonstration), + mathdemo_response_format, + ) + + def test_rec_strict_json_schema(self): + invalid_schema = mathdemo_schema | {"wrong_value": 1} + self.assertEqual( + rec_strict_json_schema(mathdemo_schema), mathdemo_strict_schema + ) + + with self.assertRaises(ValueError): + rec_strict_json_schema(invalid_schema) + + +if __name__ == "__main__": + unittest.main() diff --git a/src/mistralai/extra/utils/__init__.py b/src/mistralai/extra/utils/__init__.py new file mode 100644 index 0000000..5011f1a --- /dev/null +++ b/src/mistralai/extra/utils/__init__.py @@ -0,0 +1,3 @@ +from .response_format import response_format_from_pydantic_model + +__all__ = ["response_format_from_pydantic_model"] diff --git a/src/mistralai/extra/utils/_pydantic_helper.py b/src/mistralai/extra/utils/_pydantic_helper.py new file mode 100644 index 0000000..08523f4 --- /dev/null +++ b/src/mistralai/extra/utils/_pydantic_helper.py @@ -0,0 +1,20 @@ +from typing import Any + +def rec_strict_json_schema(schema_node: Any) -> Any: + """ + Recursively set the additionalProperties property to False for all objects in the JSON Schema. + This makes the JSON Schema strict (i.e. no additional properties are allowed). + """ + if isinstance(schema_node, (str, bool)): + return schema_node + if isinstance(schema_node, dict): + if "type" in schema_node and schema_node["type"] == "object": + schema_node["additionalProperties"] = False + for key, value in schema_node.items(): + schema_node[key] = rec_strict_json_schema(value) + elif isinstance(schema_node, list): + for i, value in enumerate(schema_node): + schema_node[i] = rec_strict_json_schema(value) + else: + raise ValueError(f"Unexpected type: {schema_node}") + return schema_node diff --git a/src/mistralai/extra/utils/response_format.py b/src/mistralai/extra/utils/response_format.py new file mode 100644 index 0000000..f9ded3f --- /dev/null +++ b/src/mistralai/extra/utils/response_format.py @@ -0,0 +1,24 @@ +from pydantic import BaseModel +from typing import TypeVar, Any, Type +from ...models import JSONSchema, ResponseFormat +from ._pydantic_helper import rec_strict_json_schema + +CustomPydanticModel = TypeVar("CustomPydanticModel", bound=BaseModel) + + +def response_format_from_pydantic_model( + model: type[CustomPydanticModel], +) -> ResponseFormat: + """Generate a strict JSON schema from a pydantic model.""" + model_schema = rec_strict_json_schema(model.model_json_schema()) + json_schema = JSONSchema.model_validate( + {"name": model.__name__, "schema": model_schema, "strict": True} + ) + return ResponseFormat(type="json_schema", json_schema=json_schema) + + +def pydantic_model_from_json( + json_data: dict[str, Any], pydantic_model: Type[CustomPydanticModel] +) -> CustomPydanticModel: + """Parse a JSON schema into a pydantic model.""" + return pydantic_model.model_validate(json_data) diff --git a/src/mistralai/httpclient.py b/src/mistralai/httpclient.py index 167cea4..9dc43cb 100644 --- a/src/mistralai/httpclient.py +++ b/src/mistralai/httpclient.py @@ -1,6 +1,8 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" # pyright: reportReturnType = false +import asyncio +from concurrent.futures import ThreadPoolExecutor from typing_extensions import Protocol, runtime_checkable import httpx from typing import Any, Optional, Union @@ -82,3 +84,51 @@ def build_request( async def aclose(self) -> None: pass + + +class ClientOwner(Protocol): + client: Union[HttpClient, None] + async_client: Union[AsyncHttpClient, None] + + +def close_clients( + owner: ClientOwner, + sync_client: Union[HttpClient, None], + async_client: Union[AsyncHttpClient, None], +) -> None: + """ + A finalizer function that is meant to be used with weakref.finalize to close + httpx clients used by an SDK so that underlying resources can be garbage + collected. + """ + + # Unset the client/async_client properties so there are no more references + # to them from the owning SDK instance and they can be reaped. + owner.client = None + owner.async_client = None + + if sync_client is not None: + try: + sync_client.close() + except Exception: + pass + + if async_client is not None: + is_async = False + try: + asyncio.get_running_loop() + is_async = True + except RuntimeError: + pass + + try: + # If this function is called in an async loop then start another + # loop in a separate thread to close the async http client. + if is_async: + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(asyncio.run, async_client.aclose()) + future.result() + else: + asyncio.run(async_client.aclose()) + except Exception: + pass diff --git a/src/mistralai/models/__init__.py b/src/mistralai/models/__init__.py index 608edba..ee083f3 100644 --- a/src/mistralai/models/__init__.py +++ b/src/mistralai/models/__init__.py @@ -264,6 +264,7 @@ JobsAPIRoutesFineTuningUpdateFineTunedModelRequestTypedDict, ) from .jobsout import JobsOut, JobsOutObject, JobsOutTypedDict +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .legacyjobmetadataout import ( LegacyJobMetadataOut, LegacyJobMetadataOutObject, @@ -515,6 +516,8 @@ "InputsTypedDict", "Integrations", "IntegrationsTypedDict", + "JSONSchema", + "JSONSchemaTypedDict", "JobIn", "JobInIntegrations", "JobInIntegrationsTypedDict", diff --git a/src/mistralai/models/jsonschema.py b/src/mistralai/models/jsonschema.py new file mode 100644 index 0000000..76e4033 --- /dev/null +++ b/src/mistralai/models/jsonschema.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" + +from __future__ import annotations +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +import pydantic +from pydantic import model_serializer +from typing import Any, Dict, Optional +from typing_extensions import Annotated, NotRequired, TypedDict + + +class JSONSchemaTypedDict(TypedDict): + name: str + schema_definition: Dict[str, Any] + description: NotRequired[Nullable[str]] + strict: NotRequired[bool] + + +class JSONSchema(BaseModel): + name: str + + schema_definition: Annotated[Dict[str, Any], pydantic.Field(alias="schema")] + + description: OptionalNullable[str] = UNSET + + strict: Optional[bool] = False + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["description", "strict"] + nullable_fields = ["description"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/responseformat.py b/src/mistralai/models/responseformat.py index aa60ba5..17424af 100644 --- a/src/mistralai/models/responseformat.py +++ b/src/mistralai/models/responseformat.py @@ -1,8 +1,10 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from __future__ import annotations +from .jsonschema import JSONSchema, JSONSchemaTypedDict from .responseformats import ResponseFormats -from mistralai.types import BaseModel +from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL +from pydantic import model_serializer from typing import Optional from typing_extensions import NotRequired, TypedDict @@ -10,8 +12,41 @@ class ResponseFormatTypedDict(TypedDict): type: NotRequired[ResponseFormats] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + json_schema: NotRequired[Nullable[JSONSchemaTypedDict]] class ResponseFormat(BaseModel): type: Optional[ResponseFormats] = None r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" + + json_schema: OptionalNullable[JSONSchema] = UNSET + + @model_serializer(mode="wrap") + def serialize_model(self, handler): + optional_fields = ["type", "json_schema"] + nullable_fields = ["json_schema"] + null_default_fields = [] + + serialized = handler(self) + + m = {} + + for n, f in self.model_fields.items(): + k = f.alias or n + val = serialized.get(k) + serialized.pop(k, None) + + optional_nullable = k in optional_fields and k in nullable_fields + is_set = ( + self.__pydantic_fields_set__.intersection({n}) + or k in null_default_fields + ) # pylint: disable=no-member + + if val is not None and val != UNSET_SENTINEL: + m[k] = val + elif val != UNSET_SENTINEL and ( + not k in optional_fields or (optional_nullable and is_set) + ): + m[k] = val + + return m diff --git a/src/mistralai/models/responseformats.py b/src/mistralai/models/responseformats.py index 2c06b81..08c3995 100644 --- a/src/mistralai/models/responseformats.py +++ b/src/mistralai/models/responseformats.py @@ -4,5 +4,5 @@ from typing import Literal -ResponseFormats = Literal["text", "json_object"] +ResponseFormats = Literal["text", "json_object", "json_schema"] r"""An object specifying the format that the model must output. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is in JSON. When using JSON mode you MUST also instruct the model to produce JSON yourself with a system or a user message.""" diff --git a/src/mistralai/sdk.py b/src/mistralai/sdk.py index e0ff7ac..e24f158 100644 --- a/src/mistralai/sdk.py +++ b/src/mistralai/sdk.py @@ -1,7 +1,7 @@ """Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT.""" from .basesdk import BaseSDK -from .httpclient import AsyncHttpClient, HttpClient +from .httpclient import AsyncHttpClient, ClientOwner, HttpClient, close_clients from .sdkconfiguration import SDKConfiguration from .utils.logger import Logger, get_default_logger from .utils.retries import RetryConfig @@ -18,7 +18,8 @@ from mistralai.fine_tuning import FineTuning from mistralai.models_ import Models from mistralai.types import OptionalNullable, UNSET -from typing import Any, Callable, Dict, Optional, Union +from typing import Any, Callable, Dict, Optional, Union, cast +import weakref class Mistral(BaseSDK): @@ -118,6 +119,14 @@ def __init__( # pylint: disable=protected-access self.sdk_configuration.__dict__["_hooks"] = hooks + weakref.finalize( + self, + close_clients, + cast(ClientOwner, self.sdk_configuration), + self.sdk_configuration.client, + self.sdk_configuration.async_client, + ) + self._init_sdks() def _init_sdks(self):