diff --git a/.github/codeql/javascript-config.yml b/.github/codeql/javascript-config.yml new file mode 100644 index 00000000..4631b043 --- /dev/null +++ b/.github/codeql/javascript-config.yml @@ -0,0 +1,4 @@ +name: "CodeQL config for Javascript" + +paths: + - frontend/src/** diff --git a/.github/codeql/python-config.yml b/.github/codeql/python-config.yml new file mode 100644 index 00000000..25903f95 --- /dev/null +++ b/.github/codeql/python-config.yml @@ -0,0 +1 @@ +name: "CodeQL config for Python" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000..fb76d794 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,58 @@ +name: "CodeQL" + +on: + push: + branches: [ main, 'b[0-9].[0-9]+' ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main, 'b[0-9].[0-9]+' ] + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'javascript', 'python' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://git.io/codeql-language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + config-file: ./.github/codeql/${{ matrix.language }}-config.yml + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + #- name: Autobuild + # uses: github/codeql-action/autobuild@v2 + + # ℹī¸ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏ī¸ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/poetry-check.yaml b/.github/workflows/poetry-check.yaml index 48a76754..71a20b7f 100644 --- a/.github/workflows/poetry-check.yaml +++ b/.github/workflows/poetry-check.yaml @@ -15,6 +15,6 @@ jobs: - name: verify poetry instalation run: poetry --version working-directory: ./backend - - name: verify poetry lockfile - run: poetry check --lock - working-directory: ./backend \ No newline at end of file + - name: verify poetry configuration + run: poetry check + working-directory: ./backend diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml new file mode 100644 index 00000000..42f1a714 --- /dev/null +++ b/.github/workflows/test.yaml @@ -0,0 +1,51 @@ +name: Python checks + +on: + push: + branches: ["main"] + tags-ignore: ["**"] + pull_request: + +env: + COVERAGE: ${{ github.workspace }}/coverage + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.9.20"] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install tox>=4.19 + - name: Check for lint + # Report errors but don't fail until we achieve stability! + continue-on-error: true + run: | + cd backend + tox -e format,isort,lint + - name: Run unit tests + run: | + cd backend + tox -e unit + - name: Add coverage data to conversation + run: cat $COVERAGE/coverage.txt >> $GITHUB_STEP_SUMMARY + - name: Publish coverage data + uses: actions/upload-artifact@v4 + with: + name: Coverage for ${{ github.event.head_commit.id }} + path: ${{ env.COVERAGE }}/html + if-no-files-found: warn + retention-days: 30 + - name: Run functional tests in pod + run: | + cd backend + tests/functional/setup/test.sh diff --git a/README.md b/README.md index 540ebdda..43b3ca21 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,11 @@ indice= username= password= +[.crucible] +url= +username= +password= + [ocp-server] port=8000 @@ -53,7 +58,7 @@ indice= username= password= ``` -**Note: The below applies only for the elastic search at the moment** +**Note: The below applies only for the elastic search at the moment** If you also have an archived internal instance that keeps track of older data, it can be specified with '.internal' suffix. Example of our `OCP` internal archived instance's configuration. ```toml [ocp.elasticsearch.internal] diff --git a/backend/app/api/api.py b/backend/app/api/api.py index b74b8ad4..c37735dc 100644 --- a/backend/app/api/api.py +++ b/backend/app/api/api.py @@ -1,3 +1,4 @@ +import sys from fastapi import APIRouter from app.api.v1.endpoints.ocp import results @@ -11,6 +12,7 @@ from app.api.v1.endpoints.telco import telcoJobs from app.api.v1.endpoints.telco import telcoGraphs from app.api.v1.endpoints.ocm import ocmJobs +from app.api.v1.endpoints.ilab import ilab router = APIRouter() @@ -39,3 +41,6 @@ # OCM endpoint router.include_router(ocmJobs.router, tags=['ocm']) + +# InstructLab endpoint +router.include_router(router=ilab.router, tags=['ilab']) diff --git a/backend/app/api/v1/endpoints/ilab/ilab.py b/backend/app/api/v1/endpoints/ilab/ilab.py new file mode 100644 index 00000000..f75950e1 --- /dev/null +++ b/backend/app/api/v1/endpoints/ilab/ilab.py @@ -0,0 +1,719 @@ +"""Access RHEL AI InstructLab performance data through Crucible + +This defines an API to expose and filter performance data from InstructLab +CPT runs via a persistent Crucuble controller instance as defined in the +configuration path "ilab.crucible". +""" + +from datetime import datetime, timedelta, timezone +from typing import Annotated, Any, Optional + +from fastapi import APIRouter, Depends, Query + +from app.services.crucible_svc import CrucibleService, Graph, GraphList + +router = APIRouter() + + +CONFIGPATH = "ilab.crucible" + + +def example_response(response) -> dict[str, Any]: + return {"content": {"application/json": {"example": response}}} + + +def example_error(message: str) -> dict[str, Any]: + return example_response({"message": message}) + + +async def crucible_svc(): + crucible = None + try: + crucible = CrucibleService(CONFIGPATH) + yield crucible + finally: + if crucible: + await crucible.close() + + +@router.get( + "/api/v1/ilab/runs/filters", + summary="Returns possible filters", + description=( + "Returns a nested JSON object with all parameter and tag filter terms" + ), + responses={ + 200: example_response( + { + "param": { + "model": [ + "/home/models/granite-7b-redhat-lab", + "/home/models/granite-7b-lab/", + "/home/models/Mixtral-8x7B-Instruct-v0.1", + ], + "gpus": ["4"], + "workflow": ["train", "sdg", "train+eval"], + "data-path": [ + "/home/data/training/jun12-phase05.jsonl", + "/home/data/training/knowledge_data.jsonl", + "/home/data/training/jul19-knowledge-26k.jsonl", + "/home/data/jun12-phase05.jsonl", + ], + "nnodes": ["1"], + "train-until": ["checkpoint:1", "complete"], + "save-samples": ["5000", "2500", "10000"], + "deepspeed-cpu-offload-optimizer": ["0", "1"], + "deepspeed-cpu-offload-optimizer-pin-memory": ["0", "1"], + "batch-size": ["4", "8", "16", "12", "0"], + "cpu-offload-optimizer": ["1"], + "cpu-offload-pin-memory": ["1"], + "nproc-per-node": ["4"], + "num-runavg-samples": ["2", "6"], + "num-cpus": ["30"], + }, + "tag": {"topology": ["none"]}, + } + ) + }, +) +async def run_filters(crucible: Annotated[CrucibleService, Depends(crucible_svc)]): + return await crucible.get_run_filters() + + +@router.get( + "/api/v1/ilab/runs", + summary="Returns a list of InstructLab runs", + description="Returns a list of runs summary documents.", + responses={ + 200: example_response( + { + "results": [ + { + "benchmark": "ilab", + "email": "rhel-ai-user@example.com", + "id": "bd72561c-cc20-400b-b6f6-d9534a60033a", + "name": '"RHEL-AI User"', + "source": "n42-h01-b01-mx750c.example.com//var/lib/crucible/run/ilab--2024-09-11_19:43:53_UTC--bd72561c-cc20-400b-b6f6-d9534a60033a", + "status": "pass", + "begin_date": "1970-01-01 00:00:00+00:00", + "end_date": "1970-01-01 00:00:00+00:00", + "params": { + "gpus": "4", + "model": "/home/models/Mixtral-8x7B-Instruct-v0.1", + "workflow": "sdg", + }, + "iterations": [ + { + "iteration": 1, + "primary_metric": "ilab::sdg-samples-sec", + "primary_period": "measurement", + "status": "pass", + "params": { + "gpus": "4", + "model": "/home/models/Mixtral-8x7B-Instruct-v0.1", + "workflow": "sdg", + }, + } + ], + "primary_metrics": ["ilab::sdg-samples-sec"], + "tags": {"topology": "none"}, + } + ], + "count": 5, + "total": 21, + "startDate": "2024-08-19 20:42:52.239000+00:00", + "endDate": "2024-09-18 20:42:52.239000+00:00", + } + ), + 400: example_error( + "sort key 'bad' must be one of begin,benchmark,email,end,id,name,source,status" + ), + 422: example_error( + "invalid date format, start_date must be less than end_date" + ), + }, +) +async def runs( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], + all: Annotated[ + bool, + Query(description="Don't apply default date range", examples=["all=true"]), + ] = False, + start_date: Annotated[ + Optional[str], + Query(description="Start time for search", examples=["2020-11-10"]), + ] = None, + end_date: Annotated[ + Optional[str], + Query(description="End time for search", examples=["2020-11-10"]), + ] = None, + filter: Annotated[ + Optional[list[str]], + Query( + description="Filter terms", examples=["tag:name=value", "param:name=value"] + ), + ] = None, + sort: Annotated[ + Optional[list[str]], + Query(description="Sort terms", examples=["start:asc", "status:desc"]), + ] = None, + size: Annotated[ + Optional[int], Query(description="Number of runs in a page", examples=[10]) + ] = None, + offset: Annotated[ + int, + Query(description="Page offset to start", examples=[10]), + ] = 0, +): + if not all and start_date is None and end_date is None: + now = datetime.now(timezone.utc) + start = now - timedelta(days=30) + end = now + else: + start = start_date + end = end_date + return await crucible.get_runs( + start=start, end=end, filter=filter, sort=sort, size=size, offset=offset + ) + + +@router.get( + "/api/v1/ilab/runs/{run}/tags", + summary="Returns the Crucible tags for a run", + description="Returns tags for a specified Run ID.", + responses={ + 200: example_response({"topology": "none"}), + 400: example_error("Parameter error"), + }, +) +async def tags(crucible: Annotated[CrucibleService, Depends(crucible_svc)], run: str): + return await crucible.get_tags(run) + + +@router.get( + "/api/v1/ilab/runs/{run}/params", + summary="Returns the InstructLab parameters for a run", + description="Returns params for a specified Run ID by iteration plus common params.", + responses={ + 200: example_response( + { + "9D5AB7D6-510A-11EF-84ED-CCA69E6B5B5B": { + "num-runavg-samples": "2", + "cpu-offload-pin-memory": "1", + "nnodes": "1", + "cpu-offload-optimizer": "1", + "data-path": "/home/data/training/knowledge_data.jsonl", + "model": "/home/models/granite-7b-lab/", + "nproc-per-node": "4", + }, + "common": { + "num-runavg-samples": "2", + "cpu-offload-pin-memory": "1", + "nnodes": "1", + "cpu-offload-optimizer": "1", + "data-path": "/home/data/training/knowledge_data.jsonl", + "model": "/home/models/granite-7b-lab/", + "nproc-per-node": "4", + }, + } + ), + 400: example_error("Parameter error"), + }, +) +async def params(crucible: Annotated[CrucibleService, Depends(crucible_svc)], run: str): + return await crucible.get_params(run) + + +@router.get( + "/api/v1/ilab/runs/{run}/iterations", + summary="Returns a list of InstructLab run iterations", + description="Returns a list of iterations for a specified Run ID.", + responses={ + 200: example_response( + [ + { + "id": "6B98F650-7139-11EF-BB69-98B53E962BD1", + "num": 2, + "path": None, + "primary-metric": "ilab::sdg-samples-sec", + "primary-period": "measurement", + "status": "pass", + }, + { + "id": "6B99173E-7139-11EF-9434-F8BB3B1B9CFC", + "num": 5, + "path": None, + "primary-metric": "ilab::sdg-samples-sec", + "primary-period": "measurement", + "status": "pass", + }, + ] + ), + 400: example_error("Parameter error"), + }, +) +async def iterations( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], run: str +): + return await crucible.get_iterations(run) + + +@router.get( + "/api/v1/ilab/runs/{run}/samples", + summary="Returns a list of InstructLab run samples", + description="Returns a list of samples for a specified Run ID.", + responses={ + 200: example_response( + [ + { + "id": "6BBE6872-7139-11EF-BFAA-8569A9399D61", + "num": "1", + "path": None, + "status": "pass", + "iteration": 5, + "primary_metric": "ilab::sdg-samples-sec", + }, + { + "id": "6BACDFA8-7139-11EF-9F33-8185DD5B4869", + "num": "1", + "path": None, + "status": "pass", + "iteration": 2, + "primary_metric": "ilab::sdg-samples-sec", + }, + ] + ), + 400: example_error("Parameter error"), + }, +) +async def run_samples( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], run: str +): + return await crucible.get_samples(run) + + +@router.get( + "/api/v1/ilab/runs/{run}/periods", + summary="Returns a list of InstructLab run periods", + description="Returns a list of periods for a specified Run ID.", + responses={ + 200: example_response( + [ + { + "begin": "2024-09-12 17:40:27.982000+00:00", + "end": "2024-09-12 18:03:23.132000+00:00", + "id": "6BA57EF2-7139-11EF-A80B-E5037504B9B1", + "name": "measurement", + "iteration": 1, + "sample": "1", + "primary_metric": "ilab::sdg-samples-sec", + "status": "pass", + }, + { + "begin": "2024-09-12 18:05:03.229000+00:00", + "end": "2024-09-12 18:27:55.419000+00:00", + "id": "6BB93622-7139-11EF-A6C0-89A48E630F9D", + "name": "measurement", + "iteration": 4, + "sample": "1", + "primary_metric": "ilab::sdg-samples-sec", + "status": "pass", + }, + ] + ), + 400: example_error("Parameter error"), + }, +) +async def run_periods( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], run: str +): + return await crucible.get_periods(run) + + +@router.get( + "/api/v1/ilab/iterations/{iteration}/samples", + summary="Returns a list of InstructLab iteration samples", + description="Returns a list of iterations for a specified iteration ID.", + responses={ + 200: example_response( + [ + { + "id": "6BBE6872-7139-11EF-BFAA-8569A9399D61", + "num": "1", + "path": None, + "status": "pass", + "iteration": 5, + "primary_metric": "ilab::sdg-samples-sec", + }, + { + "id": "6BACDFA8-7139-11EF-9F33-8185DD5B4869", + "num": "1", + "path": None, + "status": "pass", + "iteration": 2, + "primary_metric": "ilab::sdg-samples-sec", + }, + ] + ), + 400: example_error("Parameter error"), + }, +) +async def iteration_samples( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], iteration: str +): + return await crucible.get_samples(iteration=iteration) + + +@router.get( + "/api/v1/ilab/runs/{run}/metrics", + summary="Describe the metrics collected for a run", + description="Returns metric labels along with breakout names and values.", + responses={ + 200: example_response( + { + "sar-net::packets-sec": { + "periods": [], + "breakouts": { + "benchmark-name": ["none"], + "benchmark-role": ["none"], + "csid": ["remotehosts-1-sysstat-1"], + "cstype": ["profiler"], + "dev": ["lo", "eno8303", "eno12399", "eno12409"], + "direction": ["rx", "tx"], + "endpoint-label": ["remotehosts-1"], + "engine-id": ["remotehosts-1-sysstat-1"], + "engine-role": ["profiler"], + "engine-type": ["profiler"], + "hosted-by": ["x.example.com"], + "hostname": ["x.example.com"], + "hypervisor-host": ["none"], + "osruntime": ["podman"], + "tool-name": ["sysstat"], + "type": ["virtual", "physical"], + "userenv": ["rhel-ai"], + }, + }, + }, + ), + 400: example_error("Parameter error"), + }, +) +async def metrics( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], run: str +): + return await crucible.get_metrics_list(run) + + +@router.get( + "/api/v1/ilab/runs/{run}/breakouts/{metric}", + summary="Returns breakout options for a metric", + description="Describes the breakout names and available values for a run.", + responses={ + 200: example_response( + { + "label": "mpstat::Busy-CPU", + "class": ["throughput"], + "type": "Busy-CPU", + "source": "mpstat", + "breakouts": {"num": ["8", "72"], "thread": [0, 1]}, + } + ), + 400: example_error("Metric name not found for run "), + }, +) +async def metric_breakouts( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], + run: str, + metric: str, + name: Annotated[ + Optional[list[str]], + Query( + description="List of name[=key] to match", + examples=["cpu=10", "cpu=10,cpu=110"], + ), + ] = None, + period: Annotated[ + Optional[list[str]], + Query( + description="List of periods to match", + examples=["", ","], + ), + ] = None, +): + return await crucible.get_metric_breakouts(run, metric, names=name, periods=period) + + +@router.get( + "/api/v1/ilab/runs/{run}/data/{metric}", + summary="Returns metric data collected for a run", + description="Returns data collected for a specified Run ID metric.", + responses={ + 200: example_response( + [ + { + "begin": "2024-08-22 20:04:05.072000+00:00", + "end": "2024-08-22 20:04:19.126000+00:00", + "duration": 14.055, + "value": 9.389257233311497, + }, + { + "begin": "2024-08-22 20:04:19.127000+00:00", + "end": "2024-08-22 20:04:32.889000+00:00", + "duration": 13.763, + "value": 9.552584444155011, + }, + ] + ), + 400: example_error("No matches for ilab::train-samples-sc+cpu=10"), + 422: example_response( + response={ + "detail": [ + { + "message": "More than one metric (2) probably means you should add filters", + "names": {"dev": ["sdb", "sdb3"]}, + "periods": [], + } + ] + } + ), + }, +) +async def metric_data( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], + run: str, + metric: str, + name: Annotated[ + Optional[list[str]], + Query( + description="List of name[=key] to match", + examples=["cpu=10", "cpu=10,cpu=110"], + ), + ] = None, + period: Annotated[ + Optional[list[str]], + Query( + description="List of periods to match", + examples=["", ","], + ), + ] = None, + aggregate: Annotated[ + bool, Query(description="Allow aggregation of metrics") + ] = False, +): + return await crucible.get_metrics_data( + run, metric, names=name, periods=period, aggregate=aggregate + ) + + +@router.get( + "/api/v1/ilab/runs/{run}/summary/{metric}", + summary="Returns metric data collected for a run", + description="Returns data collected for a specified Run ID metric.", + responses={ + 200: example_response( + { + "count": 234, + "min": 7.905045031896648, + "max": 9.666444615077308, + "avg": 9.38298722585416, + "sum": 2195.6190108498736, + } + ), + 400: example_error("No matches for ilab::train-samples-sc+cpu=10"), + 422: example_response( + response={ + "detail": [ + { + "message": "More than one metric (2) probably means you should add filters", + "names": {"dev": ["sdb", "sdb3"]}, + "periods": [], + } + ] + } + ), + }, +) +async def metric_summary( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], + run: str, + metric: str, + name: Annotated[ + Optional[list[str]], + Query( + description="List of name[=key] to match", + examples=["cpu=10", "cpu=10,cpu=110"], + ), + ] = None, + period: Annotated[ + Optional[list[str]], + Query( + description="List of periods to match", + examples=["", ","], + ), + ] = None, +): + return await crucible.get_metrics_summary(run, metric, names=name, periods=period) + + +@router.post( + "/api/v1/ilab/runs/multigraph", + summary="Returns overlaid Plotly graph objects", + description="Returns metric data in a form usable by the Plot React component.", + responses={ + 200: example_response( + response={ + "data": [ + { + "x": [ + "2024-09-05 21:50:07+00:00", + "2024-09-05 21:56:37+00:00", + "2024-09-05 21:56:37.001000+00:00", + "2024-09-05 21:56:52+00:00", + "2024-09-05 21:56:52.001000+00:00", + "2024-09-05 22:01:52+00:00", + ], + "y": [0.0, 0.0, 0.33, 0.33, 0.0, 0.0], + "name": "iostat::operations-merged-sec [cmd=read,dev=sdb]", + "type": "scatter", + "mode": "line", + "marker": {"color": "black"}, + "labels": {"x": "sample timestamp", "y": "samples / second"}, + "yaxis": "y", + }, + { + "x": [ + "2024-09-05 21:50:07+00:00", + "2024-09-05 21:56:37+00:00", + "2024-09-05 21:56:37.001000+00:00", + "2024-09-05 21:56:52+00:00", + "2024-09-05 21:56:52.001000+00:00", + "2024-09-05 22:01:52+00:00", + ], + "y": [0.0, 0.0, 0.33, 0.33, 0.0, 0.0], + "name": "iostat::operations-merged-sec [dev=sdb,cmd=read]", + "type": "scatter", + "mode": "line", + "marker": {"color": "purple"}, + "labels": {"x": "sample timestamp", "y": "samples / second"}, + "yaxis": "y", + }, + ], + "layout": { + "width": "1500", + "yaxis": { + "title": "iostat::operations-merged-sec", + "color": "black", + }, + }, + } + ), + 400: example_error("No matches for ilab::train-samples-sec"), + 422: example_response( + response={ + "detail": [ + { + "message": "More than one metric (2) probably means you should add filters", + "names": {"dev": ["sdb", "sdb3"]}, + "periods": [], + } + ] + } + ), + }, +) +async def metric_graph_body( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], graphs: GraphList +): + return await crucible.get_metrics_graph(graphs) + + +@router.get( + "/api/v1/ilab/runs/{run}/graph/{metric}", + summary="Returns a single Plotly graph object for a run", + description="Returns metric data in a form usable by the Plot React component.", + responses={ + 200: example_response( + response={ + "data": [ + { + "x": [ + "2024-09-12 16:49:01+00:00", + "2024-09-12 18:04:31+00:00", + "2024-09-12 18:04:31.001000+00:00", + "2024-09-12 18:04:46+00:00", + "2024-09-12 18:04:46.001000+00:00", + "2024-09-12 18:53:16+00:00", + ], + "y": [0.0, 0.0, 1.4, 1.4, 0.0, 0.0], + "name": "iostat::operations-merged-sec [cmd=read,dev=sda]", + "type": "scatter", + "mode": "line", + "marker": {"color": "black"}, + "labels": { + "x": "sample timestamp", + "y": "samples / second", + }, + "yaxis": "y", + } + ], + "layout": { + "width": "1500", + "yaxis": { + "title": "iostat::operations-merged-sec", + "color": "black", + }, + }, + } + ), + 400: example_error("No matches for ilab::train-samples-sec"), + 422: example_response( + response={ + "detail": [ + { + "message": "More than one metric (2) probably means you should add filters", + "names": {"dev": ["sdb", "sdb3"]}, + "periods": [], + } + ] + } + ), + }, +) +async def metric_graph_param( + crucible: Annotated[CrucibleService, Depends(crucible_svc)], + run: str, + metric: str, + aggregate: Annotated[ + bool, Query(description="Allow aggregation of metrics") + ] = False, + name: Annotated[ + Optional[list[str]], + Query( + description="List of name[=key] to match", + examples=["cpu=10", "cpu=10,cpu=110"], + ), + ] = None, + period: Annotated[ + Optional[list[str]], + Query( + description="List of periods to match", + examples=["", ","], + ), + ] = None, + title: Annotated[Optional[str], Query(description="Title for graph")] = None, +): + return await crucible.get_metrics_graph( + GraphList( + run=run, + name=metric, + graphs=[ + Graph( + metric=metric, + aggregate=aggregate, + names=name, + periods=period, + title=title, + ) + ], + ) + ) diff --git a/backend/app/services/crucible_readme.md b/backend/app/services/crucible_readme.md new file mode 100644 index 00000000..0ac2e71f --- /dev/null +++ b/backend/app/services/crucible_readme.md @@ -0,0 +1,164 @@ +Crucible divides data across a set of OpenSearch (or ElasticSearch) indices, +each with a specific document mapping. CDM index names include a "root" name +(like "run") with a versioned prefix, like "cdmv7dev-run". + +Crucible timestamps are integers in "millisecond-from-the-epoch" format. + +The Crucible CDM hierarchy is roughly: + +- RUN (an instrumented benchmark run) + - TAG (metadata) + - ITERATION (a benchmark interval) + - PARAM (execution parameters) + - SAMPLE + - PERIOD (time range where data is recorded) + - METRIC_DESC (description of a specific recorded metric) + - METRIC_DATA (a specific recorded data point) + +OpenSearch doesn't support the concept of a SQL "join", but many of the indices +contain documents that could be considered a static "join" with parent documents +for convenience. For example, each `iteration` document contains a copy of it's +parent `run` document, while the `period` document contains copies of its parent +`sample`, `iteration`, and `run` documents. This means, for example, that it's +possible to make a single query returning all `period` documents for specific +iteration number of a specific run. + +
+
RUN
this contains the basic information about a performance run, including a + generated UUID, begin and end timestamps, a benchmark name, a user name and + email, the (host/directory) "source" of the indexed data (which is usable on + the controler's local file system), plus host and test harness names.
+
TAG
this contains information about a general purpose "tag" to associate some + arbitrary context with a run, for example software versions, hardware, or + other metadata. This can be considered a SQL JOIN with the run document, + adding a tag UUID, name, and value.
+
ITERATION
this contains basic information about a performance run iteration, + including the iteration UUID, number, the primary (benchmark) metric associated + with the iteration, plus the primary "period" of the iteration, and the + iteration status.
+
PARAM
this defines a key/value pair specifying behavior of the benchmark + script for an iteration. Parameters are iteration-specific, but parameters that + don't vary between iterations are often represented as run parameters.
+
SAMPLE
this contains basic information about a sample of an iteration, + including a sample UUID and sample number, along with a "path" for sample data + and a sample status.
+
PERIOD
this contains basic information about a period during which data is + collected within a sample, including the period UUID, name, and begin and end + timestamps. A set of periods can be "linked" through a "prev_id" field.
+
METRIC_DESC
this contains descriptive data about a specific series + of metric values within a specific period of a run, including the metric UUID, + the metric "class", type, and source, along with a set of "names" (key/value + pairs) defining the metric breakout details that narrow down a specific source and + type. For example source:mpstat, type:Busy-CPU data is broken down by package, cpu, + core, and other breakouts which can be isolated or aggregated for data reporting.
+
METRIC_DATA
this describes a specific data point, sampled over a specified + duration with a fixed begin and end timestamp, plus a floating point value. + Each is tied to a specific metric_desc UUID value. Depending on the varied + semantics of metric_desc breakouts, it's often valid to aggregate these + across a set of relatead metric_desc IDs, based on source and type, for + example to get aggregate CPU load across all modes, cores, or across all + modes within a core. This service allows arbitrary aggregation within a + given metric source and type, but by default will attempt to direct the + caller to specifying a set of breakouts that result in a single metric_desc + ID.
+
+ +The `crucible_svc` allows CPT project APIs to access a Crucible CDM backing +store to find information about runs, tags, params, iterations, samples, +periods, plus various ways to expose and aggregate metric data both for +primary benchmarks and non-periodic tools. + +The `get_runs` API is the primary entry point, returning an object that +supports filtering, sorting, and pagination of the Crucible run data decorated +with useful iteration, tag, and parameter data. + +The metrics data APIs (data, breakouts, summary, and graph) now allow +filtering by the metric "name" data. This allows "drilling down" through +the non-periodic "tool data". For example, IO data is per-disk, CPU +information is broken down by core and package. You can now aggregate +all global data (e.g., total system CPU), or filter by breakout names to +select by CPU, mode (usr, sys, irq), etc. + +For example, to return `Busy-CPU` ("type") graph data from the `mpstat` +("source") tool for system mode on one core, you might query: + +``` +/api/v1/ilab/runs//graph/mpstat::Busy-CPU?name=core=12,package=1,num=77,type=sys +``` + +If you make a `graph`, `data`, or `summary` query that doesn't translate +to a unique metric, and don't select aggregation, you'll get a diagnostic +message identifying possible additional filters. For example, with +`type=sys` removed, that same query will show the available values for +the `type` breakout name: + +``` +{ + "detail": [ + { + "message": "More than one metric (5) probably means you should add filters", + "names": { + "type": [ + "guest", + "irq", + "soft", + "sys", + "usr" + ] + }, + "periods": [] + } + ] +} +``` + +This capability can be used to build an interactive exploratory UI to +allow displaying breakout details. The `get_metrics` API will show all +recorded metrics, along with information the names and values used in +those. Metrics that show "names" with more than one value will need to be +filtered to produce meaningful summaries or graphs. + +You can instead aggregate metrics across breakouts using the `?aggregate` +query parameter, like `GET /api/v1/ilab/runs//graph/mpstat::Busy-CPU?aggregate` +which will aggregate all CPU busy data for the system. + +Normally you'll want to display data based on sample periods, for example the +primary period of an iteration, using `?period=`. This will +implicitly constrain the metric data based on the period ID associated with +the `metric_desc` document *and* the begin/end time period of the selected +periods. Normally, a benchmark will will separate iterations because each is +run with a different parameter value, and the default graph labeling will +look for a set of distinct parameters not used by other iterations: for +example, `mpstat::Busy-CPU (batch-size=16)`. + +The `get_breakouts` API can be used to explore the namespace recorded for that +metric in the specified run. For example, + +``` +GET /api/v1/ilab/runs//breakouts/sar-net::packets-sec?name=direction=rx +{ + "label": "sar-net::packets-sec", + "source": "sar-net", + "type": "packets-sec", + "class": [], + "names": { + "dev": [ + "lo", + "eno12409", + "eno12399" + ], + "type": [ + "physical", + "virtual" + ] + } +} +``` + +The `get_filters` API reports all the tag and param filter tags and +values for the runs. These can be used for the `filters` query parameter +on `get_runs` to restrict the set of runs reported; for example, +`/api/v1/ilab/runs?filter=param:workflow=sdg` shows only runs with the param +arg `workflow` set to the value `sdg`. You can search for a subset of the +string value using the operator "~" instead of "=". For example, +`?filter=param:user~user` will match `user` values of "A user" or "The user". diff --git a/backend/app/services/crucible_svc.py b/backend/app/services/crucible_svc.py new file mode 100644 index 00000000..6a7556fd --- /dev/null +++ b/backend/app/services/crucible_svc.py @@ -0,0 +1,1982 @@ +"""Service to pull data from a Crucible CDM OpenSearch data store + +A set of helper methods to enable a project API to easily process data from a +Crucible controller's OpenSearch data backend. + +This includes paginated, filtered, and sorted lists of benchmark runs, along +access to the associated Crucible documents such as iterations, samples, and +periods. Metric data can be accessed by breakout names, or aggregated by +breakout subsets or collection periods as either raw data points, statistical +aggregate, or Plotly graph format for UI display. +""" + +from collections import defaultdict +from dataclasses import dataclass +from datetime import datetime, timezone +import logging +import time +from typing import Any, Iterator, Optional, Tuple, Union + +from elasticsearch import AsyncElasticsearch +from fastapi import HTTPException, status +from pydantic import BaseModel + +from app import config + + +class Graph(BaseModel): + """Describe a single graph + + This represents a JSON object provided by a caller through the get_graph + API to describe a specific metric graph. + + The default title (if the field is omitted) is the metric label with a + suffix denoting breakout values selected, any unique parameter values + in a selected iteration, and (if multiple runs are selected in any Graph + list) an indication of the run index. For example, + "mpstat::Busy-CPU [core=2,type=usr] (batch-size=16)". + + Fields: + metric: the metric label, "ilab::train-samples-sec" + aggregate: True to aggregate unspecified breakouts + color: CSS color string ("green" or "#008000") + names: Lock in breakouts + periods: Select metrics for specific test period(s) + run: Override the default run ID from GraphList + title: Provide a title for the graph. The default is a generated title + """ + + metric: str + aggregate: bool = False + color: Optional[str] = None + names: Optional[list[str]] = None + periods: Optional[list[str]] = None + run: Optional[str] = None + title: Optional[str] = None + + +class GraphList(BaseModel): + """Describe a set of overlaid graphs + + This represents a JSON object provided by a caller through the get_graph + API to introduce a set of constrained metrics to be graphed. The "run + ID" here provides a default for the embedded Graph objects, and can be + omitted if all Graph objects specify a run ID. (This is most useful to + select a set of graphs all for a single run ID.) + + Normally the X axis will be the actual sample timestamp values; if you + specify relative=True, the X axis will be the duration from the first + timestamp of the metric series, in seconds. This allows graphs of similar + runs started at different times to be overlaid. + + Fields: + run: Specify the (default) run ID + name: Specify a name for the set of graphs + relative: True for relative timescale in seconds + graphs: a list of Graph objects + """ + + run: Optional[str] = None + name: str + relative: bool = False + graphs: list[Graph] + + +@dataclass +class Point: + """Graph point + + Record the start & end timestamp and value of a metric data point + """ + + begin: int + end: int + value: float + + +COLOR_NAMES = [ + "black", + "aqua", + "blue", + "fuschia", + "gray", + "green", + "maroon", + "navy", + "olive", + "teal", + "silver", + "lightskyblue", + "mediumspringgreen", + "mistyrose", + "darkgoldenrod", + "cadetblue", + "chocolate", + "coral", + "brown", + "bisque", + "deeppink", + "sienna", +] + + +@dataclass +class Term: + namespace: str + key: str + value: str + + +class Parser: + """Help parsing filter expressions.""" + + def __init__(self, term: str): + """Construct an instance to help parse query parameter expressions + + These consist of a sequence of tokens separated by delimiters. Each + token may be quoted to allow matching against strings with spaces. + + For example, `param:name="A string"` + + Args: + term: A filter expression to parse + """ + self.buffer = term + self.context = term + self.offset = 0 + + def _next_token( + self, delimiters: list[str] = [], optional: bool = False + ) -> Tuple[str, Union[str, None]]: + """Extract the next token from an expression + + Tokens may be quoted; the quotes are removed. for example, the two + expressions `'param':"workflow"='"sdg"'` and `param:workflow:sdg` are + identical. + + Args: + delimiters: a list of delimiter characters + optional: whether the terminating delimiter is optional + + Returns: + A tuple consisting of the token and the delimiter (or None if + parsing reached the end of the expression and the delimiter was + optional) + """ + + @dataclass + class Quote: + open: int + quote: str + + quoted: list[Quote] = [] + next_char = None + token = "" + first_quote = None + for o in range(len(self.buffer)): + next_char = self.buffer[o] + if next_char in delimiters and not quoted: + self.buffer = self.buffer[o + 1 :] + self.offset += o + 1 + break + elif next_char in ('"', "'"): + if o == 0: + first_quote = next_char + if quoted and quoted[-1].quote == next_char: + quoted.pop() + else: + quoted.append(Quote(o, next_char)) + token += next_char + else: + next_char = None + if quoted: + q = quoted[-1] + c = self.context + i = q.open + self.offset + annotated = c[:i] + "[" + c[i] + "]" + c[i + 1 :] + raise HTTPException( + status.HTTP_400_BAD_REQUEST, f"Unterminated quote at {annotated!r}" + ) + + # If delimiters are specified, and not optional, then we didn't + # find one, and that's an error. + if not optional and delimiters: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + f"Missing delimiter from {','.join(delimiters)} after {token!r}", + ) + self.buffer = "" + self.offset = len(self.context) + return (token, next_char) if not first_quote else (token[1:-1], next_char) + + +class CommonParams: + """Help with sorting out parameters + + Parameter values are associated with iterations, but often a set of + parameters is common across all iterations of a run, and that set can + provide useful context. + + This helps to filter out identical parameters across a set of + iterations. + """ + + def __init__(self): + self.common: dict[str, Any] = {} + self.omit = set() + + def add(self, params: dict[str, Any]): + """Add a new iteration into the param set + + Mark all parameter keys which don't appear in all iterations, or which + have different values in at least one iteration, to be omitted from the + merged "common" param set. + + Args: + params: the param dictionary of an iteration + """ + if not self.common: + self.common.update(params) + else: + for k, v in self.common.items(): + if k not in self.omit and (k not in params or v != params[k]): + self.omit.add(k) + + def render(self) -> dict[str, Any]: + """Return a new param set with only common params""" + return {k: v for k, v in self.common.items() if k not in self.omit} + + +class CrucibleService: + """Support convenient generalized access to Crucible data + + This implements access to the "v7" Crucible "Common Data Model" through + OpenSearch queries. + """ + + # OpenSearch massive limit on hits in a single query + BIGQUERY = 262144 + + # Define the 'run' document fields that support general filtering via + # `?filter=:` + # + # TODO: this excludes 'desc', which isn't used by the ilab runs, and needs + # different treatment as it's a text field rather than a term. It's not an + # immediate priority for ilab, but may be important for general use. + RUN_FILTERS = ("benchmark", "email", "name", "source", "harness", "host") + + # Define the keywords for sorting. + DIRECTIONS = ("asc", "desc") + FIELDS = ( + "begin", + "benchmark", + "desc", + "email", + "end", + "harness", + "host", + "id", + "name", + "source", + ) + + # Set up a Logger at class level rather than at each instance creation + formatter = logging.Formatter( + "%(asctime)s %(process)d:%(thread)d %(levelname)s %(module)s:%(lineno)d %(message)s" + ) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger = logging.getLogger("CrucibleService") + logger.addHandler(handler) + + def __init__(self, configpath: str = "crucible"): + """Initialize a Crucible CDM (OpenSearch) connection. + + Generally the `configpath` should be scoped, like `ilab.crucible` so + that multiple APIs based on access to distinct Crucible controllers can + coexist. + + Initialization includes making an "info" call to confirm and record the + server response. + + Args: + configpath: The Vyper config path (e.g., "ilab.crucible") + """ + self.cfg = config.get_config() + self.user = self.cfg.get(configpath + ".username") + self.password = self.cfg.get(configpath + ".password") + self.auth = (self.user, self.password) if self.user or self.password else None + self.url = self.cfg.get(configpath + ".url") + self.elastic = AsyncElasticsearch(self.url, basic_auth=self.auth) + self.logger.info("Initializing CDM V7 service to %s", self.url) + + @staticmethod + def _get_index(root: str) -> str: + """Expand the root index name to the full name""" + return "cdmv7dev-" + root + + @staticmethod + def _get(source: dict[str, Any], fields: list[str], default: Optional[Any] = None): + """Safely traverse nested dictionaries with a default value""" + r = source + last_missing = False + for f in fields: + last_missing = f not in r + r = r.get(f, {}) + return default if last_missing else r + + @staticmethod + def _split_list(alist: Optional[list[str]] = None) -> list[str]: + """Split a list of parameters + + For simplicity, the APIs supporting "list" query parameters allow + each element in the list to be a comma-separated list of strings. + For example, ["a", "b", "c"] is logically the same as ["a,b,c"]. + + This method normalizes the second form into first to simplify life for + consumers. + + Args: + alist: list of names or name lists + + Returns: + A simple list of options + """ + l: list[str] = [] + if alist: + for n in alist: + l.extend(n.split(",")) + return l + + @staticmethod + def _normalize_date(value: Optional[Union[int, str, datetime]]) -> int: + """Normalize date parameters + + The Crucible data model stores dates as string representations of an + integer "millseconds-from-epoch" value. To allow flexibility, this + Crucible service allows incoming dates to be specified as ISO-format + strings, as integers, or as the stringified integer. + + That is, "2024-09-12 18:29:35.123000+00:00", "1726165775123", and + 1726165775123 are identical. + + Args: + value: Representation of a date-time value + + Returns: + The integer milliseconds-from-epoch equivalent + """ + try: + if isinstance(value, int): + return value + elif isinstance(value, datetime): + return int(value.timestamp() * 1000.0) + else: + # If it's a stringified int, convert & return; otherwise try + # to decode as a date string. + try: + return int(value) + except ValueError: + pass + d = datetime.fromisoformat(value) + return int(d.timestamp() * 1000.0) + except Exception as e: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + f"Date representation {value} is not valid: {str(e)!r}", + ) + + @classmethod + def _hits( + cls, payload: dict[str, Any], fields: Optional[list[str]] = None + ) -> Iterator[dict[str, Any]]: + """Helper to iterate through OpenSearch query matches + + Iteratively yields the "_source" of each hit. As a convenience, can + yield a sub-object of "_source" ... for example, specifying the + optional "fields" as ["metric_desc", "id"] will yield the equivalent of + hit["_source"]["metric_desc"]["id"] + + Args: + payload: OpenSearch reponse payload + fields: Optional sub-fields of "_source" + + Returns: + Yields each object from the "greatest hits" list + """ + if "hits" not in payload or not isinstance(payload["hits"], dict): + raise HTTPException( + status_code=500, detail=f"Attempt to iterate hits for {payload}" + ) + hits = cls._get(payload, ["hits", "hits"], []) + for h in hits: + source = h["_source"] + yield source if not fields else cls._get(source, fields) + + @classmethod + def _aggs( + cls, payload: dict[str, Any], aggregation: str + ) -> Iterator[dict[str, Any]]: + """Helper to access OpenSearch aggregations + + Iteratively yields the name and value of each aggregation returned + by an OpenSearch query. This can also be used for nested aggregations + by specifying an aggregation object. + + Args: + payload: A JSON dict containing an "aggregations" field + + Returns: + Yields each aggregation from an aggregation bucket list + """ + if "aggregations" not in payload or not isinstance( + payload["aggregations"], dict + ): + raise HTTPException( + status_code=500, + detail=f"Attempt to iterate missing aggregations for {payload}", + ) + aggs = payload["aggregations"] + if aggregation not in aggs or not isinstance(aggs[aggregation], dict): + raise HTTPException( + status_code=500, + detail=f"Attempt to iterate missing aggregation {aggregation!r} for {payload}", + ) + for agg in cls._get(aggs, [aggregation, "buckets"], []): + yield agg + + @staticmethod + def _format_timestamp(timestamp: Union[str, int]) -> str: + """Convert stringified integer milliseconds-from-epoch to ISO date""" + try: + ts = int(timestamp) + except Exception as e: + CrucibleService.logger.warning( + "invalid timestamp %r: %r", timestamp, str(e) + ) + ts = 0 + return str(datetime.fromtimestamp(ts / 1000.00, timezone.utc)) + + @classmethod + def _format_data(cls, data: dict[str, Any]) -> dict[str, Any]: + """Helper to format a "metric_data" object + + Crucible stores the date, duration, and value as strings, so this + converts them to more useful values. The end timestamp is converted + to an ISO date-time string; the duration and value to floating point + numbers. + + Args: + data: a "metric_data" object + + Returns: + A neatly formatted "metric_data" object + """ + return { + "begin": cls._format_timestamp(data["begin"]), + "end": cls._format_timestamp(data["end"]), + "duration": int(data["duration"]) / 1000, + "value": float(data["value"]), + } + + @classmethod + def _format_period(cls, period: dict[str, Any]) -> dict[str, Any]: + """Helper to format a "period" object + + Crucible stores the date values as stringified integers, so this + converts the begin and end timestamps to ISO date-time strings. + + Args: + period: a "period" object + + Returns: + A neatly formatted "period" object + """ + return { + "begin": cls._format_timestamp(timestamp=period["begin"]), + "end": cls._format_timestamp(period["end"]), + "id": period["id"], + "name": period["name"], + } + + @classmethod + def _build_filter_options(cls, filter: Optional[list[str]] = None) -> Tuple[ + Optional[list[dict[str, Any]]], + Optional[list[dict[str, Any]]], + Optional[list[dict[str, Any]]], + ]: + """Build filter terms for tag and parameter filter terms + + Each term has the form ":". Any term + may be quoted: quotes are stripped and ignored. (This is generally only + useful on the to include spaces.) + + We support three namespaces: + param: Match against param index arg/val + tag: Match against tag index name/val + run: Match against run index fields + + We support two operators: + =: Exact match + ~: Partial match + + Args: + filter: list of filter terms like "param:key=value" + + Returns: + A set of OpenSearch filter object lists to detect missing + and matching documents for params, tags, and run fields. For + example, to select param:batch-size=12 results in the + following param filter list: + + [ + {' + dis_max': { + 'queries': [ + { + 'bool': { + 'must': [ + {'term': {'param.arg': 'batch-size'}}, + {'term': {'param.val': '12'}} + ] + } + } + ] + } + } + ] + """ + terms = defaultdict(list) + for term in cls._split_list(filter): + p = Parser(term) + namespace, _ = p._next_token([":"]) + key, operation = p._next_token(["=", "~"]) + value, _ = p._next_token() + if operation == "~": + value = f".*{value}.*" + matcher = "regexp" + else: + matcher = "term" + if namespace in ("param", "tag"): + if namespace == "param": + key_field = "param.arg" + value_field = "param.val" + else: + key_field = "tag.name" + value_field = "tag.val" + terms[namespace].append( + { + "bool": { + "must": [ + {"term": {key_field: key}}, + {matcher: {value_field: value}}, + ] + } + } + ) + elif namespace == "run": + terms[namespace].append({matcher: {f"run.{key}": value}}) + else: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + f"unknown filter namespace {namespace!r}", + ) + param_filter = None + tag_filter = None + if "param" in terms: + param_filter = [{"dis_max": {"queries": terms["param"]}}] + if "tag" in terms: + tag_filter = [{"dis_max": {"queries": terms["tag"]}}] + return param_filter, tag_filter, terms.get("run") + + @classmethod + def _build_name_filters( + cls, namelist: Optional[list[str]] = None + ) -> list[dict[str, Any]]: + """Build filter terms for metric breakout names + + for example, "cpu=10" filters for metric data descriptors where the + breakout name "cpu" exists and has a value of 10. + + Args: + namelist: list of possibly comma-separated list values + + Returns: + A list of filters to match breakout terms + """ + names: list[str] = cls._split_list(namelist) + filters = [] + for e in names: + try: + n, v = e.split("=", maxsplit=1) + except ValueError: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, f"Filter item {e!r} must be '='" + ) + filters.append({"term": {f"metric_desc.names.{n}": v}}) + return filters + + @classmethod + def _build_period_filters( + cls, periodlist: Optional[list[str]] = None + ) -> list[dict[str, Any]]: + """Build period filters + + Generate metric_desc filter terms to match against a list of period IDs. + + Note that not all metric descriptions are periodic, and we don't want + these filters to exclude them -- so the filter will exclude only + documents that have a period and don't match. (That is, we won't drop + any non-periodic metrics. We expect those to be filtered by timestamp + instead.) + + Args: + period: list of possibly comma-separated period IDs + + Returns: + A filter term that requires a period.id match only for metric_desc + documents with a period. + """ + pl: list[str] = cls._split_list(periodlist) + if pl: + return [ + { + "dis_max": { + "queries": [ + {"bool": {"must_not": {"exists": {"field": "period"}}}}, + {"terms": {"period.id": pl}}, + ] + } + } + ] + else: + return [] + + @classmethod + def _build_metric_filters( + cls, + run: str, + metric: str, + names: Optional[list[str]] = None, + periods: Optional[list[str]] = None, + ) -> list[dict[str, Any]]: + """Helper for filtering metric descriptions + + We normally filter by run, metric "label", and optionally by breakout + names and periods. This encapsulates the filter construction. + + Args: + run: run ID + metric: metric label (ilab::sdg-samples-sec) + names: list of "name=value" filters + periods: list of period IDs + + Returns: + A list of OpenSearch filter expressions + """ + msource, mtype = metric.split("::") + return ( + [ + {"term": {"run.id": run}}, + {"term": {"metric_desc.source": msource}}, + {"term": {"metric_desc.type": mtype}}, + ] + + cls._build_name_filters(names) + + cls._build_period_filters(periods) + ) + + @classmethod + def _build_sort_terms(cls, sorters: Optional[list[str]]) -> list[dict[str, Any]]: + """Build sort term list + + Sorters may reference any native `run` index field and must specify + either "asc"(ending) or "desc"(ending) sort order. Any number of + sorters may be combined, like ["name:asc,benchmark:desc", "end:desc"] + + Args: + sorters: list of : sort terms + + Returns: + list of OpenSearch sort terms + """ + if sorters: + sort_terms = [] + for s in sorters: + key, dir = s.split(":", maxsplit=1) + if dir not in cls.DIRECTIONS: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + f"Sort direction {dir!r} must be one of {','.join(cls.DIRECTIONS)}", + ) + if key not in cls.FIELDS: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + f"Sort key {key!r} must be one of {','.join(cls.FIELDS)}", + ) + sort_terms.append({f"run.{key}": {"order": dir}}) + else: + sort_terms = [{"run.begin": {"order": "asc"}}] + return sort_terms + + async def _search( + self, index: str, query: Optional[dict[str, Any]] = None, **kwargs + ) -> dict[str, Any]: + """Issue an OpenSearch query + + Args: + index: The "base" CDM index name, e.g., "run", "metric_desc" + query: An OpenSearch query object + kwargs: Additional OpenSearch parameters + + Returns: + The OpenSearch response payload (JSON dict) + """ + idx = self._get_index(index) + start = time.time() + value = await self.elastic.search(index=idx, body=query, **kwargs) + self.logger.info( + "QUERY on %s took %.3f seconds, hits: %d", + idx, + time.time() - start, + value.get("hits", {}).get("total"), + ) + return value + + async def close(self): + """Close the OpenSearch connection""" + if self.elastic: + await self.elastic.close() + self.elastic = None + + async def search( + self, + index: str, + filters: Optional[list[dict[str, Any]]] = None, + aggregations: Optional[dict[str, Any]] = None, + sort: Optional[list[dict[str, str]]] = None, + source: Optional[str] = None, + size: Optional[int] = None, + offset: Optional[int] = None, + **kwargs, + ) -> dict[str, Any]: + """OpenSearch query helper + + Combine index, filters, aggregations, sort, and pagination options + into an OpenSearch query. + + Args: + index: "root" CDM index name ("run", "metric_desc", ...) + filters: list of JSON dict filter terms {"term": {"name": "value}} + aggregations: list of JSON dict aggregations {"name": {"term": "name"}} + sort: list of JSON dict sort terms ("name": "asc") + size: The number of hits to return; defaults to "very large" + offset: The number of hits to skip, for pagination + kwargs: Additional OpenSearch options + + Returns: + The OpenSearch response + """ + f = filters if filters else [] + query = { + "size": self.BIGQUERY if size is None else size, + "query": {"bool": {"filter": f}}, + } + if sort: + query.update({"sort": sort}) + if source: + query.update({"_source": source}) + if offset: + query.update({"from": offset}) + if aggregations: + query.update({"aggs": aggregations}) + return await self._search(index, query, **kwargs) + + async def _get_metric_ids( + self, + run: str, + metric: str, + namelist: Optional[list[str]] = None, + periodlist: Optional[list[str]] = None, + aggregate: bool = False, + ) -> list[str]: + """Generate a list of matching metric_desc IDs + + Given a specific run and metric name, and a set of breakout filters, + returns a list of metric desc IDs that match. + + If a single ID is required to produce a consistent metric, and the + supplied filters produce more than one without aggregation, raise a + 422 HTTP error (UNPROCESSABLE CONTENT) with a response body showing + the unsatisfied breakouts (name and available values). + + TODO: Instead of either single metric or aggregation across multiple + metrics, we should support "breakouts", which would individually + process (graph, summarize, or list) data for each "loose" breakout + name. E.g., Busy-CPU might list per-core, or per-processor mode. + + Args: + run: run ID + metric: combined metric name (e.g., sar-net::packets-sec) + namelist: a list of breakout filters like "type=physical" + periodlist: a list of period IDs + aggregate: if True, allow multiple metric IDs + + Returns: + A list of matching metric_desc ID value(s) + """ + filters = self._build_metric_filters(run, metric, namelist, periodlist) + metrics = await self.search( + "metric_desc", + filters=filters, + ignore_unavailable=True, + ) + if len(metrics["hits"]["hits"]) < 1: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + ( + f"No matches for {metric}" + f"{('+' + ','.join(namelist) if namelist else '')}" + ), + ) + ids = [h["metric_desc"]["id"] for h in self._hits(metrics)] + if len(ids) < 2 or aggregate: + return ids + + # If we get here, the client asked for breakout data that doesn't + # resolve to a single metric stream, and didn't specify aggregation. + # Offer some help. + names = defaultdict(set) + periods = set() + response = { + "message": f"More than one metric ({len(ids)}) means " + "you should add breakout filters or aggregate." + } + for m in self._hits(metrics): + if "period" in m: + periods.add(m["period"]["id"]) + for n, v in m["metric_desc"]["names"].items(): + names[n].add(v) + + # We want to help filter a consistent summary, so only show those + # breakout names with more than one value. + response["names"] = {n: sorted(v) for n, v in names.items() if v and len(v) > 1} + response["periods"] = sorted(periods) + raise HTTPException( + status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=response + ) + + async def _build_timestamp_range_filters( + self, periods: Optional[list[str]] = None + ) -> list[dict[str, Any]]: + """Create a timestamp range filter + + This extracts the begin and end timestamps from the list of periods and + builds a timestamp filter range to select documents on or after the + earliest begin timestamp and on or before the latest end timestamp. + + Args: + periods: a list of CDM period IDs + + Returns: + Constructs a range filter for the earliest begin timestamp and the + latest end timestamp among the specified periods. + """ + if periods: + ps = self._split_list(periods) + matches = await self.search( + "period", filters=[{"terms": {"period.id": ps}}] + ) + start = None + end = None + name = "" + for h in self._hits(matches): + p = h["period"] + st = p.get("begin") + et = p.get("end") + if not st or not et: + name = ( + f"run {self._get(h, ['run', 'benchmark'])}:" + f"{self._get(h, ['run', 'begin'])}," + f"iteration {self._get(h, ['iteration', 'num'])}," + f"sample {self._get(h, ['sample', 'num'])}" + ) + if st and (not start or st < start): + start = st + if et and (not end or et > end): + end = et + if start is None or end is None: + raise HTTPException( + status.HTTP_422_UNPROCESSABLE_ENTITY, + f"Unable to compute {name!r} time range: the run is missing period timestamps", + ) + return [ + {"range": {"metric_data.begin": {"gte": str(start)}}}, + {"range": {"metric_data.end": {"lte": str(end)}}}, + ] + else: + return [] + + async def _get_run_ids( + self, index: str, filters: Optional[list[dict[str, Any]]] = None + ) -> set[str]: + """Return a set of run IDs matching a filter + + Documents in the specified index must have "run.id" fields. Returns + a set of unique run IDs matched by the filter in the specified index. + + Args: + index: root CDM index name + filters: a list of OpenSearch filter terms + + Returns: + a set of unique run ID values + """ + filtered = await self.search( + index, source="run.id", filters=filters, ignore_unavailable=True + ) + self.logger.debug("HITS: %s", filtered["hits"]["hits"]) + return set([x for x in self._hits(filtered, ["run", "id"])]) + + async def get_run_filters(self) -> dict[str, dict[str, list[str]]]: + """Return possible tag and filter terms + + Return a description of tag and param filter terms meaningful + across all datasets. TODO: we should support date-range and benchmark + filtering. Consider supporting all `run` API filtering, which would + allow adjusting the filter popups to drop options no longer relevant + to a given set. + + { + "param": { + {"gpus": [4", "8"]} + } + } + + Returns: + A two-level JSON dict; the first level is the namespace (param or + tag), the second level key is the param/tag/field name and its value + is the set of values defined for that key. + """ + tags = await self.search( + "tag", + size=0, + aggregations={ + "key": { + "terms": {"field": "tag.name", "size": self.BIGQUERY}, + "aggs": { + "values": {"terms": {"field": "tag.val", "size": self.BIGQUERY}} + }, + } + }, + ignore_unavailable=True, + ) + params = await self.search( + "param", + size=0, + aggregations={ + "key": { + "terms": {"field": "param.arg", "size": self.BIGQUERY}, + "aggs": { + "values": { + "terms": {"field": "param.val", "size": self.BIGQUERY} + } + }, + } + }, + ignore_unavailable=True, + ) + aggs = { + k: {"terms": {"field": f"run.{k}", "size": self.BIGQUERY}} + for k in self.RUN_FILTERS + } + runs = await self.search( + "run", + size=0, + aggregations=aggs, + ) + result = defaultdict(lambda: defaultdict(lambda: set())) + for p in self._aggs(params, "key"): + for v in p["values"]["buckets"]: + result["param"][p["key"]].add(v["key"]) + for t in self._aggs(tags, "key"): + for v in t["values"]["buckets"]: + result["tag"][t["key"]].add(v["key"]) + for name in self.RUN_FILTERS: + for f in self._aggs(runs, name): + result["run"][name].add(f["key"]) + return {s: {k: list(v) for k, v in keys.items()} for s, keys in result.items()} + + async def get_runs( + self, + filter: Optional[list[str]] = None, + start: Optional[Union[int, str, datetime]] = None, + end: Optional[Union[int, str, datetime]] = None, + offset: int = 0, + sort: Optional[list[str]] = None, + size: Optional[int] = None, + **kwargs, + ) -> dict[str, Any]: + """Return matching Crucible runs + + Filtered and sorted list of runs. + + { + "sort": [], + "startDate": "2024-01-01T05:00:00+00:00", + "size": 1, + "offset": 0, + "results": [ + { + "begin": "1722878906342", + "benchmark": "ilab", + "email": "A@email", + "end": "1722880503544", + "id": "4e1d2c3c-b01c-4007-a92d-23a561af2c11", + "name": "\"A User\"", + "source": "node.example.com//var/lib/crucible/run/ilab--2024-08-05_17:17:13_UTC--4e1d2c3c-b01c-4007-a92d-23a561af2c11", + "tags": { + "topology": "none" + }, + "iterations": [ + { + "iteration": 1, + "primary_metric": "ilab::train-samples-sec", + "primary_period": "measurement", + "status": "pass", + "params": { + "cpu-offload-pin-memory": "1", + "model": "/home/models/granite-7b-lab/", + "data-path": "/home/data/training/knowledge_data.jsonl", + "cpu-offload-optimizer": "1", + "nnodes": "1", + "nproc-per-node": "4", + "num-runavg-samples": "2" + } + } + ], + "primary_metrics": [ + "ilab::train-samples-sec" + ], + "status": "pass", + "params": { + "cpu-offload-pin-memory": "1", + "model": "/home/models/granite-7b-lab/", + "data-path": "/home/data/training/knowledge_data.jsonl", + "cpu-offload-optimizer": "1", + "nnodes": "1", + "nproc-per-node": "4", + "num-runavg-samples": "2" + }, + "begin_date": "2024-08-05 17:28:26.342000+00:00", + "end_date": "2024-08-05 17:55:03.544000+00:00" + } + ], + "count": 1, + "total": 15, + "next_offset": 1 + } + + Args: + start: Include runs starting at timestamp + end: Include runs ending no later than timestamp + filter: List of tag/param filter terms (parm:key=value) + sort: List of sort terms (column:) + size: Include up to runs in output + offset: Use size/from pagination instead of search_after + + Returns: + JSON object with "results" list and "housekeeping" fields + """ + + # We need to remove runs which don't match against 'tag' or 'param' + # filter terms. The CDM schema doesn't make it possible to do this in + # one shot. Instead, we run queries against the param and tag indices + # separately, producing a list of run IDs which we'll exclude from the + # final collection. + # + # If there are no matches, we can exit early. (TODO: should this be an + # error, or just a success with an empty list?) + results = {} + filters = [] + sorters = self._split_list(sort) + results["sort"] = sorters + sort_terms = self._build_sort_terms(sorters) + param_filters, tag_filters, run_filters = self._build_filter_options(filter) + if run_filters: + filters.extend(run_filters) + if start or end: + s = None + e = None + if start: + s = self._normalize_date(start) + results["startDate"] = datetime.fromtimestamp( + s / 1000.0, tz=timezone.utc + ).isoformat() + if end: + e = self._normalize_date(end) + results["endDate"] = datetime.fromtimestamp( + e / 1000.0, tz=timezone.utc + ).isoformat() + + if s and e and s > e: + raise HTTPException( + status_code=422, + detail={ + "error": "Invalid date format, start_date must be less than end_date" + }, + ) + cond = {} + if s: + cond["gte"] = str(s) + if e: + cond["lte"] = str(e) + filters.append({"range": {"run.begin": cond}}) + if size: + results["size"] = size + results["offset"] = offset if offset is not None else 0 + + # In order to filter by param or tag values, we need to produce a list + # of matching RUN IDs from each index. We'll then drop any RUN ID that's + # not on both lists. + if tag_filters: + tagids = await self._get_run_ids("tag", tag_filters) + if param_filters: + paramids = await self._get_run_ids("param", param_filters) + + # If it's obvious we can't produce any matches at this point, exit. + if (tag_filters and len(tagids) == 0) or (param_filters and len(paramids) == 0): + results.update({"results": [], "count": 0, "total": 0}) + return results + + hits = await self.search( + "run", + size=size, + offset=offset, + sort=sort_terms, + filters=filters, + **kwargs, + ignore_unavailable=True, + ) + rawiterations = await self.search("iteration", ignore_unavailable=True) + rawtags = await self.search("tag", ignore_unavailable=True) + rawparams = await self.search("param", ignore_unavailable=True) + + iterations = defaultdict(list) + tags = defaultdict(defaultdict) + params = defaultdict(defaultdict) + run_params = defaultdict(list) + + for i in self._hits(rawiterations): + iterations[i["run"]["id"]].append(i["iteration"]) + + # Organize tags by run ID + for t in self._hits(rawtags): + tags[t["run"]["id"]][t["tag"]["name"]] = t["tag"]["val"] + + # Organize params by iteration ID + for p in self._hits(rawparams): + run_params[p["run"]["id"]].append(p) + params[p["iteration"]["id"]][p["param"]["arg"]] = p["param"]["val"] + + runs = {} + for h in self._hits(hits): + run = h["run"] + rid = run["id"] + + # Filter the runs by our tag and param queries + if param_filters and rid not in paramids: + continue + + if tag_filters and rid not in tagids: + continue + + # Collect unique runs: the status is "fail" if any iteration for + # that run ID failed. + runs[rid] = run + run["tags"] = tags.get(rid, {}) + run["iterations"] = [] + run["primary_metrics"] = set() + common = CommonParams() + for i in iterations.get(rid, []): + iparams = params.get(i["id"], {}) + if "status" not in run: + run["status"] = i["status"] + else: + if i["status"] != "pass": + run["status"] = i["status"] + common.add(iparams) + run["primary_metrics"].add(i["primary-metric"]) + run["iterations"].append( + { + "iteration": i["num"], + "primary_metric": i["primary-metric"], + "primary_period": i["primary-period"], + "status": i["status"], + "params": iparams, + } + ) + run["iterations"].sort(key=lambda i: i["iteration"]) + run["params"] = common.render() + try: + run["begin_date"] = self._format_timestamp(run["begin"]) + run["end_date"] = self._format_timestamp(run["end"]) + except KeyError as e: + self.logger.warning("Missing 'run' key %r in %s", str(e), run) + run["begin_date"] = self._format_timestamp("0") + run["end_date"] = self._format_timestamp("0") + + count = len(runs) + total = hits["hits"]["total"]["value"] + results.update( + { + "results": list(runs.values()), + "count": count, + "total": total, + } + ) + if size and (offset + count < total): + results["next_offset"] = offset + size + return results + + async def get_tags(self, run: str, **kwargs) -> dict[str, str]: + """Return the set of tags associated with a run + + Args: + run: run ID + + Returns: + JSON dict with "tag" keys showing each value + """ + tags = await self.search( + index="tag", + filters=[{"term": {"run.id": run}}], + **kwargs, + ignore_unavailable=True, + ) + return {t["name"]: t["val"] for t in self._hits(tags, ["tag"])} + + async def get_params( + self, run: Optional[str] = None, iteration: Optional[str] = None, **kwargs + ) -> dict[str, dict[str, str]]: + """Return the set of parameters for a run or iteration + + Parameters are technically associated with an iteration, but can be + aggregated for a run. This will return a set of parameters for each + iteration; plus, if a "run" was specified, a filtered list of param + values that are common across all iterations. + + Args: + run: run ID + iteration: iteration ID + kwargs: additional OpenSearch keywords + + Returns: + JSON dict of param values by iteration (plus "common" if by run ID) + """ + if not run and not iteration: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + "A params query requires either a run or iteration ID", + ) + match = {"run.id" if run else "iteration.id": run if run else iteration} + params = await self.search( + index="param", + filters=[{"term": match}], + **kwargs, + ignore_unavailable=True, + ) + response = defaultdict(defaultdict) + for param in self._hits(params): + iter = param["iteration"]["id"] + arg = param["param"]["arg"] + val = param["param"]["val"] + old = self._get(response, [iter, arg]) + if old: + self.logger.warning( + "Duplicate param %s for iteration %s (%r, %r)", arg, iter, old, val + ) + response[iter][arg] = val + + # Filter out all parameter values that don't exist in all or which have + # different values. + if run: + common = CommonParams() + for params in response.values(): + common.add(params) + response["common"] = common.render() + return response + + async def get_iterations(self, run: str, **kwargs) -> list[dict[str, Any]]: + """Return a list of iterations for a run + + Args: + run: run ID + kwargs: additional OpenSearch keywords + + Returns: + A list of iteration documents + """ + iterations = await self.search( + index="iteration", + filters=[{"term": {"run.id": run}}], + sort=[{"iteration.num": "asc"}], + **kwargs, + ignore_unavailable=True, + ) + return [i["iteration"] for i in self._hits(iterations)] + + async def get_samples( + self, run: Optional[str] = None, iteration: Optional[str] = None, **kwargs + ): + """Return a list of samples for a run or iteration + + Args: + run: run ID + iteration: iteration ID + kwargs: additional OpenSearch keywords + + Returns: + A list of sample documents. + """ + if not run and not iteration: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + "A sample query requires either a run or iteration ID", + ) + match = {"run.id" if run else "iteration.id": run if run else iteration} + hits = await self.search( + index="sample", + filters=[{"term": match}], + **kwargs, + ignore_unavailable=True, + ) + samples = [] + for s in self._hits(hits): + sample = s["sample"] + sample["iteration"] = s["iteration"]["num"] + sample["primary_metric"] = s["iteration"]["primary-metric"] + samples.append(sample) + return samples + + async def get_periods( + self, + run: Optional[str] = None, + iteration: Optional[str] = None, + sample: Optional[str] = None, + **kwargs, + ): + """Return a list of periods associated with a run, an iteration, or a + sample + + The "period" document is normalized to represent timestamps using ISO + strings. + + Args: + run: run ID + iteration: iteration ID + sample: sample ID + kwargs: additional OpenSearch parameters + + Returns: + a list of normalized period documents + """ + if not any((run, iteration, sample)): + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + "A period query requires a run, iteration, or sample ID", + ) + match = None + if sample: + match = {"sample.id": sample} + elif iteration: + match = {"iteration.id": iteration} + else: + match = {"run.id": run} + periods = await self.search( + index="period", + filters=[{"term": match}], + sort=[{"period.begin": "asc"}], + **kwargs, + ignore_unavailable=True, + ) + body = [] + for h in self._hits(periods): + period = self._format_period(period=h["period"]) + period["iteration"] = h["iteration"]["num"] + period["sample"] = h["sample"]["num"] + period["primary_metric"] = h["iteration"]["primary-metric"] + period["status"] = h["iteration"]["status"] + body.append(period) + return body + + async def get_metrics_list(self, run: str, **kwargs) -> dict[str, Any]: + """Return a list of metrics available for a run + + Each run may have multiple performance metrics stored. This API allows + retrieving a sorted list of the metrics available for a given run, with + the "names" selection criteria available for each and, for "periodic" + (benchmark) metrics, the defined periods for which data was gathered. + + { + "ilab::train-samples-sec": { + "periods": [{"id": , "name": "measurement"}], + "breakouts": {"benchmark-group" ["unknown"], ...} + }, + "iostat::avg-queue-length": { + "periods": [], + "breakouts": {"benchmark-group": ["unknown"], ...}, + }, + ... + } + + Args: + run: run ID + + Returns: + List of metrics available for the run + """ + hits = await self.search( + index="metric_desc", + filters=[{"term": {"run.id": run}}], + ignore_unavailable=True, + **kwargs, + ) + met = {} + for h in self._hits(hits): + desc = h["metric_desc"] + name = desc["source"] + "::" + desc["type"] + if name in met: + record = met[name] + else: + record = {"periods": [], "breakouts": defaultdict(list)} + met[name] = record + if "period" in h: + record["periods"].append(h["period"]["id"]) + for n, v in desc["names"].items(): + # mimic a set, since the set type doesn't serialize + if v not in record["breakouts"][n]: + record["breakouts"][n].append(v) + return met + + async def get_metric_breakouts( + self, + run: str, + metric: str, + names: Optional[list[str]] = None, + periods: Optional[list[str]] = None, + ) -> dict[str, Any]: + """Help explore available metric breakouts + + Args: + run: run ID + metric: metric label (e.g., "mpstat::Busy-CPU") + names: list of name filters ("cpu=3") + periods: list of period IDs + + Returns: + A description of all breakout names and values, which can be + specified to narrow down metrics returns by the data, summary, and + graph APIs. + + { + "label": "mpstat::Busy-CPU", + "class": [ + "throughput" + ], + "type": "Busy-CPU", + "source": "mpstat", + "breakouts": { + "num": [ + "8", + "72" + ], + "thread": [ + 0, + 1 + ] + } + } + """ + start = time.time() + filters = self._build_metric_filters(run, metric, names, periods) + metric_name = metric + ("" if not names else ("+" + ",".join(names))) + metrics = await self.search( + "metric_desc", + filters=filters, + ignore_unavailable=True, + ) + if len(metrics["hits"]["hits"]) < 1: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, + f"Metric name {metric_name} not found for run {run}", + ) + classes = set() + response = {"label": metric} + breakouts = defaultdict(list) + pl = set() + for m in self._hits(metrics): + desc = m["metric_desc"] + response["type"] = desc["type"] + response["source"] = desc["source"] + if desc.get("class"): + classes.add(desc["class"]) + if "period" in m: + pl.add(m["period"]["id"]) + for n, v in desc["names"].items(): + if v not in breakouts[n]: + breakouts[n].append(v) + # We want to help filter a consistent summary, so only show those + # names with more than one value. + if len(pl) > 1: + response["periods"] = sorted(pl) + response["class"] = sorted(classes) + response["breakouts"] = {n: v for n, v in breakouts.items() if len(v) > 1} + self.logger.info("Processing took %.3f seconds", time.time() - start) + return response + + async def get_metrics_data( + self, + run: str, + metric: str, + names: Optional[list[str]] = None, + periods: Optional[list[str]] = None, + aggregate: bool = False, + ) -> list[Any]: + """Return a list of metric data + + The "aggregate" option allows aggregating various metrics across + breakout streams and periods: be careful, as this is meaningful only if + the breakout streams are sufficiently related. + + Args: + run: run ID + metric: metric label (e.g., "mpstat::Busy-CPU") + names: list of name filters ("cpu=3") + periods: list of period IDs + aggregate: aggregate multiple metric data streams + + Returns: + A sequence of data samples, showing the aggregate sample along with + the duration and end timestamp of each sample interval. + + [ + { + "begin": "2024-08-22 20:03:23.028000+00:00", + "end": "2024-08-22 20:03:37.127000+00:00", + "duration": 14.1, + "value": 9.35271216694379 + }, + { + "begin": "2024-08-22 20:03:37.128000+00:00", + "end": "2024-08-22 20:03:51.149000+00:00", + "duration": 14.022, + "value": 9.405932330557683 + }, + { + "begin": "2024-08-22 20:03:51.150000+00:00", + "end": "2024-08-22 20:04:05.071000+00:00", + "duration": 13.922, + "value": 9.478773265522682 + } + ] + """ + start = time.time() + ids = await self._get_metric_ids( + run, metric, names, periodlist=periods, aggregate=aggregate + ) + + # If we're searching by periods, filter metric data by the period + # timestamp range rather than just relying on the metric desc IDs as + # we also want to filter non-periodic tool data. + filters = [{"terms": {"metric_desc.id": ids}}] + filters.extend(await self._build_timestamp_range_filters(periods)) + + response = [] + + # NOTE -- _get_metric_ids already failed if we found multiple IDs but + # aggregation wasn't specified. + if len(ids) > 1: + # Find the minimum sample interval of the selected metrics + aggdur = await self.search( + "metric_data", + size=0, + filters=filters, + aggregations={"duration": {"stats": {"field": "metric_data.duration"}}}, + ) + if aggdur["aggregations"]["duration"]["count"] > 0: + interval = int(aggdur["aggregations"]["duration"]["min"]) + data = await self.search( + index="metric_data", + size=0, + filters=filters, + aggregations={ + "interval": { + "histogram": { + "field": "metric_data.end", + "interval": interval, + }, + "aggs": {"value": {"sum": {"field": "metric_data.value"}}}, + } + }, + ) + for h in self._aggs(data, "interval"): + response.append( + { + "begin": self._format_timestamp(h["key"] - interval), + "end": self._format_timestamp(h["key"]), + "value": h["value"]["value"], + "duration": interval / 1000.0, + } + ) + else: + data = await self.search("metric_data", filters=filters) + for h in self._hits(data, ["metric_data"]): + response.append(self._format_data(h)) + response.sort(key=lambda a: a["end"]) + self.logger.info("Processing took %.3f seconds", time.time() - start) + return response + + async def get_metrics_summary( + self, + run: str, + metric: str, + names: Optional[list[str]] = None, + periods: Optional[list[str]] = None, + ) -> dict[str, Any]: + """Return a statistical summary of metric data + + Provides a statistical summary of selected data samples. + + Args: + run: run ID + metric: metric label (e.g., "mpstat::Busy-CPU") + names: list of name filters ("cpu=3") + periods: list of period IDs + + Returns: + A statistical summary of the selected metric data + + { + "count": 71, + "min": 0.0, + "max": 0.3296, + "avg": 0.02360704225352113, + "sum": 1.676self.BIGQUERY00000001 + } + """ + start = time.time() + ids = await self._get_metric_ids(run, metric, names, periodlist=periods) + filters = [{"terms": {"metric_desc.id": ids}}] + filters.extend(await self._build_timestamp_range_filters(periods)) + data = await self.search( + "metric_data", + size=0, + filters=filters, + aggregations={"score": {"stats": {"field": "metric_data.value"}}}, + ) + self.logger.info("Processing took %.3f seconds", time.time() - start) + return data["aggregations"]["score"] + + async def _graph_title( + self, + run_id: str, + run_id_list: list[str], + graph: Graph, + params_by_run: dict[str, Any], + periods_by_run: dict[str, Any], + ) -> str: + """Compute a default title for a graph + + Use the period, breakout name selections, run list, and iteration + parameters to construct a meaningful name for a graph. + + For example, "ilab::sdg-samples-sec (batch-size=4) {run 1}", or + "mpstat::Busy-CPU [cpu=4]" + + Args: + run_id: the Crucible run ID + run_id_list: ordered list of run IDs in our list of graphs + graph: the current Graph object + params_by_run: initially empty dict used to cache parameters + periods_by_run: initially empty dict used to cache periods + + Returns: + A string title + """ + names = graph.names + metric = graph.metric + if run_id not in params_by_run: + # Gather iteration parameters outside the loop for help in + # generating useful labels. + all_params = await self.search( + "param", filters=[{"term": {"run.id": run_id}}] + ) + collector = defaultdict(defaultdict) + for h in self._hits(all_params): + collector[h["iteration"]["id"]][h["param"]["arg"]] = h["param"]["val"] + params_by_run[run_id] = collector + else: + collector = params_by_run[run_id] + + if run_id not in periods_by_run: + periods = await self.search( + "period", filters=[{"term": {"run.id": run_id}}] + ) + iteration_periods = defaultdict(set) + for p in self._hits(periods): + iteration_periods[p["iteration"]["id"]].add(p["period"]["id"]) + periods_by_run[run_id] = iteration_periods + else: + iteration_periods = periods_by_run[run_id] + + # We can easily end up with multiple graphs across distinct + # periods or iterations, so we want to be able to provide some + # labeling to the graphs. We do this by looking for unique + # iteration parameters values, since the iteration number and + # period name aren't useful by themselves. + name_suffix = "" + if graph.periods: + iteration = None + for i, pset in iteration_periods.items(): + if set(graph.periods) <= pset: + iteration = i + break + + # If the period(s) we're graphing resolve to a single + # iteration in a run with multiple iterations, then we can + # try to find a unique title suffix based on distinct param + # values for that iteration. + if iteration and len(collector) > 1: + unique = collector[iteration].copy() + for i, params in collector.items(): + if i != iteration: + for p in list(unique.keys()): + if p in params and unique[p] == params[p]: + del unique[p] + if unique: + name_suffix = ( + " (" + ",".join([f"{p}={v}" for p, v in unique.items()]) + ")" + ) + + if len(run_id_list) > 1: + name_suffix += f" {{run {run_id_list.index(run_id) + 1}}}" + + options = (" [" + ",".join(names) + "]") if names else "" + return metric + options + name_suffix + + async def get_metrics_graph(self, graphdata: GraphList) -> dict[str, Any]: + """Return metrics data for a run + + Each run may have multiple performance metrics stored. This API allows + retrieving graphable time-series representation of a metric over the + period of the run, in the format defined by Plotly as configuration + settings plus an x value array and a y value array. + + { + "data": [ + { + "x": [ + "2024-08-27 09:16:27.371000", + ... + ], + "y": [ + 10.23444312132161, + ... + ], + "name": "Metric ilab::train-samples-sec", + "type": "scatter", + "mode": "line", + "marker": {"color": "black"}, + "labels": {"x": "sample timestamp", "y": "samples / second"} + } + ] + "layout": { + "width": 1500, + "yaxis": { + "title": "mpstat::Busy-CPU core=2,package=0,num=112,type=usr", + "color": "black" + } + } + } + + Args: + graphdata: A GraphList object + + Returns: + A Plotly object with layout + """ + start = time.time() + graphlist = [] + default_run_id = graphdata.run + layout: dict[str, Any] = {"width": "1500"} + axes = {} + yaxis = None + cindex = 0 + params_by_run = {} + periods_by_run = {} + + # Construct a de-duped ordered list of run IDs, starting with the + # default. + run_id_list = [] + if default_run_id: + run_id_list.append(default_run_id) + run_id_missing = False + for g in graphdata.graphs: + if g.run: + if g.run not in run_id_list: + run_id_list.append(g.run) + else: + run_id_missing = True + + if run_id_missing and not default_run_id: + raise HTTPException( + status.HTTP_400_BAD_REQUEST, "each graph request must have a run ID" + ) + + for g in graphdata.graphs: + run_id = g.run if g.run else default_run_id + names = g.names + metric: str = g.metric + + # The caller can provide a title for each graph; but, if not, we + # journey down dark overgrown pathways to fabricate a default with + # reasonable context, including unique iteration parameters, + # breakdown selections, and which run provided the data. + if g.title: + title = g.title + else: + title = await self._graph_title( + run_id, run_id_list, g, params_by_run, periods_by_run + ) + + ids = await self._get_metric_ids( + run_id, + metric, + names, + periodlist=g.periods, + aggregate=g.aggregate, + ) + filters = [{"terms": {"metric_desc.id": ids}}] + filters.extend(await self._build_timestamp_range_filters(g.periods)) + y_max = 0.0 + points: list[Point] = [] + + # If we're pulling multiple breakouts, e.g., total CPU across modes + # or cores, we want to aggregate by timestamp interval. Sample + # timstamps don't necessarily align, so the "histogram" aggregation + # normalizes within the interval (based on the minimum actual + # interval duration). + if len(ids) > 1: + # Find the minimum sample interval of the selected metrics + aggdur = await self.search( + "metric_data", + size=0, + filters=filters, + aggregations={ + "duration": {"stats": {"field": "metric_data.duration"}} + }, + ) + if aggdur["aggregations"]["duration"]["count"] > 0: + interval = int(aggdur["aggregations"]["duration"]["min"]) + data = await self.search( + index="metric_data", + size=0, + filters=filters, + aggregations={ + "interval": { + "histogram": { + "field": "metric_data.begin", + "interval": interval, + }, + "aggs": { + "value": {"sum": {"field": "metric_data.value"}} + }, + } + }, + ) + for h in self._aggs(data, "interval"): + begin = int(h["key"]) + end = begin + interval - 1 + points.append(Point(begin, end, float(h["value"]["value"]))) + else: + data = await self.search("metric_data", filters=filters) + for h in self._hits(data, ["metric_data"]): + points.append( + Point(int(h["begin"]), int(h["end"]), float(h["value"])) + ) + + # Sort the graph points by timestamp so that Ploty will draw nice + # lines. We graph both the "begin" and "end" timestamp of each + # sample against the value to more clearly show the sampling + # interval. + x = [] + y = [] + + first = None + + for p in sorted(points, key=lambda a: a.begin): + if graphdata.relative: + if not first: + first = p.begin + s = (p.begin - first) / 1000.0 + e = (p.end - first) / 1000.0 + x.extend([s, e]) + else: + x.extend( + [self._format_timestamp(p.begin), self._format_timestamp(p.end)] + ) + y.extend([p.value, p.value]) + y_max = max(y_max, p.value) + + if g.color: + color = g.color + else: + color = COLOR_NAMES[cindex] + cindex += 1 + if cindex >= len(COLOR_NAMES): + cindex = 0 + graphitem = { + "x": x, + "y": y, + "name": title, + "type": "scatter", + "mode": "line", + "marker": {"color": color}, + "labels": { + "x": "sample timestamp", + "y": "samples / second", + }, + } + + # Y-axis scaling and labeling is divided by benchmark label; + # so store each we've created to reuse. (E.g., if we graph + # 5 different mpstat::Busy-CPU periods, they'll share a single + # Y axis.) + if metric in axes: + yref = axes[metric] + else: + if yaxis: + name = f"yaxis{yaxis}" + yref = f"y{yaxis}" + yaxis += 1 + layout[name] = { + "title": metric, + "color": color, + "autorange": True, + "anchor": "free", + "autoshift": True, + "overlaying": "y", + } + else: + name = "yaxis" + yref = "y" + yaxis = 2 + layout[name] = { + "title": metric, + "color": color, + } + axes[metric] = yref + graphitem["yaxis"] = yref + graphlist.append(graphitem) + self.logger.info("Processing took %.3f seconds", time.time() - start) + return {"data": graphlist, "layout": layout} diff --git a/backend/poetry.lock b/backend/poetry.lock index 3cb69987..9223e756 100644 --- a/backend/poetry.lock +++ b/backend/poetry.lock @@ -140,55 +140,26 @@ files = [ [[package]] name = "anyio" -version = "3.7.1" +version = "4.8.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"}, - {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"}, + {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, + {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, ] [package.dependencies] -exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""} idna = ">=2.8" sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] -doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"] -test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (<0.22)"] - -[[package]] -name = "asgiref" -version = "3.8.1" -description = "ASGI specs, helper code, and adapters" -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "asgiref-3.8.1-py3-none-any.whl", hash = "sha256:3e1e3ecc849832fe52ccf2cb6686b7a55f82bb1d6aee72a58826471390335e47"}, - {file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"}, -] - -[package.dependencies] -typing-extensions = {version = ">=4", markers = "python_version < \"3.11\""} - -[package.extras] -tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] - -[[package]] -name = "async-generator" -version = "1.10" -description = "Async generators and context managers for Python 3.5+" -optional = false -python-versions = ">=3.5" -groups = ["main"] -files = [ - {file = "async_generator-1.10-py3-none-any.whl", hash = "sha256:01c7bf666359b4967d2cda0000cc2e4af16a0ae098cbffcb8472fb9e8ad6585b"}, - {file = "async_generator-1.10.tar.gz", hash = "sha256:6ebb3d106c12920aaae42ccb6f787ef5eefdcdd166ea3d628fa8476abe712144"}, -] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "async-timeout" @@ -205,14 +176,14 @@ files = [ [[package]] name = "atlassian-python-api" -version = "3.41.18" +version = "3.41.19" description = "Python Atlassian REST API Wrapper" optional = false python-versions = "*" groups = ["main"] files = [ - {file = "atlassian_python_api-3.41.18-py3-none-any.whl", hash = "sha256:90e8b5fe649ace4967906552777203cf7d2d941688abb1da6b72c0f9a966038a"}, - {file = "atlassian_python_api-3.41.18.tar.gz", hash = "sha256:f38fa9c9c39fc072fa2124d2a8c9db3b684cb88797f428ddac19ae3c94d70bb3"}, + {file = "atlassian_python_api-3.41.19-py3-none-any.whl", hash = "sha256:056df6083c51f09597de8c56f7a4a1b8acec7a727a9ff156f72b2ef45fb0279c"}, + {file = "atlassian_python_api-3.41.19.tar.gz", hash = "sha256:694a81ed082a4ca8f4fa7a197d60ee2b3f34a45664a74bdfeb835c4d7ff0e305"}, ] [package.dependencies] @@ -229,14 +200,14 @@ kerberos = ["requests-kerberos"] [[package]] name = "attrs" -version = "24.3.0" +version = "25.1.0" description = "Classes Without Boilerplate" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, - {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, + {file = "attrs-25.1.0-py3-none-any.whl", hash = "sha256:c75a69e28a550a7e93789579c22aa26b0f5b83b75dc4e08fe092980051e1090a"}, + {file = "attrs-25.1.0.tar.gz", hash = "sha256:1c97078a80c814273a76b2a298a932eb681c87415c11dee0a6921de7f1b02c3e"}, ] [package.extras] @@ -269,6 +240,18 @@ charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] +[[package]] +name = "cachetools" +version = "5.5.1" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "cachetools-5.5.1-py3-none-any.whl", hash = "sha256:b76651fdc3b24ead3c648bbdeeb940c1b04d365b38b4af66788f9ec4a81d42bb"}, + {file = "cachetools-5.5.1.tar.gz", hash = "sha256:70f238fbba50383ef62e55c6aff6d9673175fe59f7c6782c7a0b9e38f4a9df95"}, +] + [[package]] name = "certifi" version = "2024.12.14" @@ -361,6 +344,18 @@ files = [ [package.dependencies] pycparser = "*" +[[package]] +name = "chardet" +version = "5.2.0" +description = "Universal encoding detector for Python 3" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"}, + {file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"}, +] + [[package]] name = "charset-normalizer" version = "3.4.1" @@ -485,12 +480,89 @@ description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["main"] -markers = "platform_system == \"Windows\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "coverage" +version = "7.6.10" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "coverage-7.6.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5c912978f7fbf47ef99cec50c4401340436d200d41d714c7a4766f377c5b7b78"}, + {file = "coverage-7.6.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a01ec4af7dfeb96ff0078ad9a48810bb0cc8abcb0115180c6013a6b26237626c"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3b204c11e2b2d883946fe1d97f89403aa1811df28ce0447439178cc7463448a"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32ee6d8491fcfc82652a37109f69dee9a830e9379166cb73c16d8dc5c2915165"}, + {file = "coverage-7.6.10-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675cefc4c06e3b4c876b85bfb7c59c5e2218167bbd4da5075cbe3b5790a28988"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f4f620668dbc6f5e909a0946a877310fb3d57aea8198bde792aae369ee1c23b5"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4eea95ef275de7abaef630c9b2c002ffbc01918b726a39f5a4353916ec72d2f3"}, + {file = "coverage-7.6.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e2f0280519e42b0a17550072861e0bc8a80a0870de260f9796157d3fca2733c5"}, + {file = "coverage-7.6.10-cp310-cp310-win32.whl", hash = "sha256:bc67deb76bc3717f22e765ab3e07ee9c7a5e26b9019ca19a3b063d9f4b874244"}, + {file = "coverage-7.6.10-cp310-cp310-win_amd64.whl", hash = "sha256:0f460286cb94036455e703c66988851d970fdfd8acc2a1122ab7f4f904e4029e"}, + {file = "coverage-7.6.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ea3c8f04b3e4af80e17bab607c386a830ffc2fb88a5484e1df756478cf70d1d3"}, + {file = "coverage-7.6.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:507a20fc863cae1d5720797761b42d2d87a04b3e5aeb682ef3b7332e90598f43"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37a84878285b903c0fe21ac8794c6dab58150e9359f1aaebbeddd6412d53132"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a534738b47b0de1995f85f582d983d94031dffb48ab86c95bdf88dc62212142f"}, + {file = "coverage-7.6.10-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d7a2bf79378d8fb8afaa994f91bfd8215134f8631d27eba3e0e2c13546ce994"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6713ba4b4ebc330f3def51df1d5d38fad60b66720948112f114968feb52d3f99"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ab32947f481f7e8c763fa2c92fd9f44eeb143e7610c4ca9ecd6a36adab4081bd"}, + {file = "coverage-7.6.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7bbd8c8f1b115b892e34ba66a097b915d3871db7ce0e6b9901f462ff3a975377"}, + {file = "coverage-7.6.10-cp311-cp311-win32.whl", hash = "sha256:299e91b274c5c9cdb64cbdf1b3e4a8fe538a7a86acdd08fae52301b28ba297f8"}, + {file = "coverage-7.6.10-cp311-cp311-win_amd64.whl", hash = "sha256:489a01f94aa581dbd961f306e37d75d4ba16104bbfa2b0edb21d29b73be83609"}, + {file = "coverage-7.6.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c6e64726b307782fa5cbe531e7647aee385a29b2107cd87ba7c0105a5d3853"}, + {file = "coverage-7.6.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c56e097019e72c373bae32d946ecf9858fda841e48d82df7e81c63ac25554078"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7827a5bc7bdb197b9e066cdf650b2887597ad124dd99777332776f7b7c7d0d0"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204a8238afe787323a8b47d8be4df89772d5c1e4651b9ffa808552bdf20e1d50"}, + {file = "coverage-7.6.10-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67926f51821b8e9deb6426ff3164870976fe414d033ad90ea75e7ed0c2e5022"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e78b270eadb5702938c3dbe9367f878249b5ef9a2fcc5360ac7bff694310d17b"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:714f942b9c15c3a7a5fe6876ce30af831c2ad4ce902410b7466b662358c852c0"}, + {file = "coverage-7.6.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:abb02e2f5a3187b2ac4cd46b8ced85a0858230b577ccb2c62c81482ca7d18852"}, + {file = "coverage-7.6.10-cp312-cp312-win32.whl", hash = "sha256:55b201b97286cf61f5e76063f9e2a1d8d2972fc2fcfd2c1272530172fd28c359"}, + {file = "coverage-7.6.10-cp312-cp312-win_amd64.whl", hash = "sha256:e4ae5ac5e0d1e4edfc9b4b57b4cbecd5bc266a6915c500f358817a8496739247"}, + {file = "coverage-7.6.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05fca8ba6a87aabdd2d30d0b6c838b50510b56cdcfc604d40760dae7153b73d9"}, + {file = "coverage-7.6.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9e80eba8801c386f72e0712a0453431259c45c3249f0009aff537a517b52942b"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a372c89c939d57abe09e08c0578c1d212e7a678135d53aa16eec4430adc5e690"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec22b5e7fe7a0fa8509181c4aac1db48f3dd4d3a566131b313d1efc102892c18"}, + {file = "coverage-7.6.10-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26bcf5c4df41cad1b19c84af71c22cbc9ea9a547fc973f1f2cc9a290002c8b3c"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e4630c26b6084c9b3cb53b15bd488f30ceb50b73c35c5ad7871b869cb7365fd"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2396e8116db77789f819d2bc8a7e200232b7a282c66e0ae2d2cd84581a89757e"}, + {file = "coverage-7.6.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:79109c70cc0882e4d2d002fe69a24aa504dec0cc17169b3c7f41a1d341a73694"}, + {file = "coverage-7.6.10-cp313-cp313-win32.whl", hash = "sha256:9e1747bab246d6ff2c4f28b4d186b205adced9f7bd9dc362051cc37c4a0c7bd6"}, + {file = "coverage-7.6.10-cp313-cp313-win_amd64.whl", hash = "sha256:254f1a3b1eef5f7ed23ef265eaa89c65c8c5b6b257327c149db1ca9d4a35f25e"}, + {file = "coverage-7.6.10-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ccf240eb719789cedbb9fd1338055de2761088202a9a0b73032857e53f612fe"}, + {file = "coverage-7.6.10-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0c807ca74d5a5e64427c8805de15b9ca140bba13572d6d74e262f46f50b13273"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bcfa46d7709b5a7ffe089075799b902020b62e7ee56ebaed2f4bdac04c508d8"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e0de1e902669dccbf80b0415fb6b43d27edca2fbd48c74da378923b05316098"}, + {file = "coverage-7.6.10-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7b444c42bbc533aaae6b5a2166fd1a797cdb5eb58ee51a92bee1eb94a1e1cb"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b330368cb99ef72fcd2dc3ed260adf67b31499584dc8a20225e85bfe6f6cfed0"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:9a7cfb50515f87f7ed30bc882f68812fd98bc2852957df69f3003d22a2aa0abf"}, + {file = "coverage-7.6.10-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f93531882a5f68c28090f901b1d135de61b56331bba82028489bc51bdd818d2"}, + {file = "coverage-7.6.10-cp313-cp313t-win32.whl", hash = "sha256:89d76815a26197c858f53c7f6a656686ec392b25991f9e409bcef020cd532312"}, + {file = "coverage-7.6.10-cp313-cp313t-win_amd64.whl", hash = "sha256:54a5f0f43950a36312155dae55c505a76cd7f2b12d26abeebbe7a0b36dbc868d"}, + {file = "coverage-7.6.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:656c82b8a0ead8bba147de9a89bda95064874c91a3ed43a00e687f23cc19d53a"}, + {file = "coverage-7.6.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ccc2b70a7ed475c68ceb548bf69cec1e27305c1c2606a5eb7c3afff56a1b3b27"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5e37dc41d57ceba70956fa2fc5b63c26dba863c946ace9705f8eca99daecdc4"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0aa9692b4fdd83a4647eeb7db46410ea1322b5ed94cd1715ef09d1d5922ba87f"}, + {file = "coverage-7.6.10-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa744da1820678b475e4ba3dfd994c321c5b13381d1041fe9c608620e6676e25"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0b1818063dc9e9d838c09e3a473c1422f517889436dd980f5d721899e66f315"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:59af35558ba08b758aec4d56182b222976330ef8d2feacbb93964f576a7e7a90"}, + {file = "coverage-7.6.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7ed2f37cfce1ce101e6dffdfd1c99e729dd2ffc291d02d3e2d0af8b53d13840d"}, + {file = "coverage-7.6.10-cp39-cp39-win32.whl", hash = "sha256:4bcc276261505d82f0ad426870c3b12cb177752834a633e737ec5ee79bbdff18"}, + {file = "coverage-7.6.10-cp39-cp39-win_amd64.whl", hash = "sha256:457574f4599d2b00f7f637a0700a6422243b3565509457b2dbd3f50703e11f59"}, + {file = "coverage-7.6.10-pp39.pp310-none-any.whl", hash = "sha256:fd34e7b3405f0cc7ab03d54a334c17a9e802897580d964bd8c2001f4b9fd488f"}, + {file = "coverage-7.6.10.tar.gz", hash = "sha256:7fb105327c8f8f0682e29843e2ff96af9dcbe5bab8eeb4b398c6a33a16d80a23"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + [[package]] name = "cryptography" version = "3.4.8" @@ -533,21 +605,21 @@ test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.0)" [[package]] name = "deprecated" -version = "1.2.15" +version = "1.2.18" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" groups = ["main"] files = [ - {file = "Deprecated-1.2.15-py2.py3-none-any.whl", hash = "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320"}, - {file = "deprecated-1.2.15.tar.gz", hash = "sha256:683e561a90de76239796e6b6feac66b99030d2dd3fcf61ef996330f14bbb9b0d"}, + {file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"}, + {file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"}, ] [package.dependencies] wrapt = ">=1.10,<2" [package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "jinja2 (>=3.0.3,<3.1.0)", "setuptools", "sphinx (<2)", "tox"] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"] [[package]] name = "deprecation" @@ -582,6 +654,18 @@ etcd = ["python-etcd (>=0.3.3)"] gevent = ["gevent (>=1.4.0)"] zookeeper = ["kazoo (>=2.0)"] +[[package]] +name = "distlib" +version = "0.3.9" +description = "Distribution utilities" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, +] + [[package]] name = "elasticsearch" version = "7.13.4" @@ -622,24 +706,41 @@ test = ["pytest (>=6)"] [[package]] name = "fastapi" -version = "0.104.1" +version = "0.115.7" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "fastapi-0.104.1-py3-none-any.whl", hash = "sha256:752dc31160cdbd0436bb93bad51560b57e525cbb1d4bbf6f4904ceee75548241"}, - {file = "fastapi-0.104.1.tar.gz", hash = "sha256:e5e4540a7c5e1dcfbbcf5b903c234feddcdcd881f191977a1c5dfd917487e7ae"}, + {file = "fastapi-0.115.7-py3-none-any.whl", hash = "sha256:eb6a8c8bf7f26009e8147111ff15b5177a0e19bb4a45bc3486ab14804539d21e"}, + {file = "fastapi-0.115.7.tar.gz", hash = "sha256:0f106da6c01d88a6786b3248fb4d7a940d071f6f488488898ad5d354b25ed015"}, ] [package.dependencies] -anyio = ">=3.7.1,<4.0.0" pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" -starlette = ">=0.27.0,<0.28.0" +starlette = ">=0.40.0,<0.46.0" typing-extensions = ">=4.8.0" [package.extras] -all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.5)", "httpx (>=0.23.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] + +[[package]] +name = "filelock" +version = "3.17.0" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "filelock-3.17.0-py3-none-any.whl", hash = "sha256:533dc2f7ba78dc2f0f531fc6c4940addf7b70a481e269a5a3b93be94ffbe8338"}, + {file = "filelock-3.17.0.tar.gz", hash = "sha256:ee4e77401ef576ebb38cd7f13b9b28893194acc20a8e68e18730ba9c0e54660e"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] +typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "frozenlist" @@ -757,72 +858,106 @@ files = [ [[package]] name = "httpcore" -version = "0.13.2" +version = "1.0.7" description = "A minimal low-level HTTP client." optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpcore-0.13.2-py3-none-any.whl", hash = "sha256:52b7d9413f6f5592a667de9209d70d4d41aba3fb0540dd7c93475c78b85941e9"}, - {file = "httpcore-0.13.2.tar.gz", hash = "sha256:c16efbdf643e1b57bde0adc12c53b08645d7d92d6d345a3f71adfc2a083e7fd2"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [package.dependencies] -h11 = "==0.*" -sniffio = "==1.*" +certifi = "*" +h11 = ">=0.13,<0.15" [package.extras] +asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httptools" -version = "0.2.0" +version = "0.6.4" description = "A collection of framework independent HTTP protocol utils." optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "httptools-0.2.0-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:79dbc21f3612a78b28384e989b21872e2e3cf3968532601544696e4ed0007ce5"}, - {file = "httptools-0.2.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:78d03dd39b09c99ec917d50189e6743adbfd18c15d5944392d2eabda688bf149"}, - {file = "httptools-0.2.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:a23166e5ae2775709cf4f7ad4c2048755ebfb272767d244e1a96d55ac775cca7"}, - {file = "httptools-0.2.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:3ab1f390d8867f74b3b5ee2a7ecc9b8d7f53750bd45714bf1cb72a953d7dfa77"}, - {file = "httptools-0.2.0-cp36-cp36m-win_amd64.whl", hash = "sha256:a7594f9a010cdf1e16a58b3bf26c9da39bbf663e3b8d46d39176999d71816658"}, - {file = "httptools-0.2.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:01b392a166adcc8bc2f526a939a8aabf89fe079243e1543fd0e7dc1b58d737cb"}, - {file = "httptools-0.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:80ffa04fe8c8dfacf6e4cef8277347d35b0442c581f5814f3b0cf41b65c43c6e"}, - {file = "httptools-0.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d5682eeb10cca0606c4a8286a3391d4c3c5a36f0c448e71b8bd05be4e1694bfb"}, - {file = "httptools-0.2.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:a289c27ccae399a70eacf32df9a44059ca2ba4ac444604b00a19a6c1f0809943"}, - {file = "httptools-0.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:813871f961edea6cb2fe312f2d9b27d12a51ba92545380126f80d0de1917ea15"}, - {file = "httptools-0.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:cc9be041e428c10f8b6ab358c6b393648f9457094e1dcc11b4906026d43cd380"}, - {file = "httptools-0.2.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:b08d00d889a118f68f37f3c43e359aab24ee29eb2e3fe96d64c6a2ba8b9d6557"}, - {file = "httptools-0.2.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:fd3b8905e21431ad306eeaf56644a68fdd621bf8f3097eff54d0f6bdf7262065"}, - {file = "httptools-0.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:200fc1cdf733a9ff554c0bb97a4047785cfaad9875307d6087001db3eb2b417f"}, - {file = "httptools-0.2.0.tar.gz", hash = "sha256:94505026be56652d7a530ab03d89474dc6021019d6b8682281977163b3471ea0"}, +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "httptools-0.6.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3c73ce323711a6ffb0d247dcd5a550b8babf0f757e86a52558fe5b86d6fefcc0"}, + {file = "httptools-0.6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:345c288418f0944a6fe67be8e6afa9262b18c7626c3ef3c28adc5eabc06a68da"}, + {file = "httptools-0.6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:deee0e3343f98ee8047e9f4c5bc7cedbf69f5734454a94c38ee829fb2d5fa3c1"}, + {file = "httptools-0.6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca80b7485c76f768a3bc83ea58373f8db7b015551117375e4918e2aa77ea9b50"}, + {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:90d96a385fa941283ebd231464045187a31ad932ebfa541be8edf5b3c2328959"}, + {file = "httptools-0.6.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:59e724f8b332319e2875efd360e61ac07f33b492889284a3e05e6d13746876f4"}, + {file = "httptools-0.6.4-cp310-cp310-win_amd64.whl", hash = "sha256:c26f313951f6e26147833fc923f78f95604bbec812a43e5ee37f26dc9e5a686c"}, + {file = "httptools-0.6.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f47f8ed67cc0ff862b84a1189831d1d33c963fb3ce1ee0c65d3b0cbe7b711069"}, + {file = "httptools-0.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0614154d5454c21b6410fdf5262b4a3ddb0f53f1e1721cfd59d55f32138c578a"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8787367fbdfccae38e35abf7641dafc5310310a5987b689f4c32cc8cc3ee975"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b0f7fe4fd38e6a507bdb751db0379df1e99120c65fbdc8ee6c1d044897a636"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40a5ec98d3f49904b9fe36827dcf1aadfef3b89e2bd05b0e35e94f97c2b14721"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dacdd3d10ea1b4ca9df97a0a303cbacafc04b5cd375fa98732678151643d4988"}, + {file = "httptools-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:288cd628406cc53f9a541cfaf06041b4c71d751856bab45e3702191f931ccd17"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:df017d6c780287d5c80601dafa31f17bddb170232d85c066604d8558683711a2"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:85071a1e8c2d051b507161f6c3e26155b5c790e4e28d7f236422dbacc2a9cc44"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69422b7f458c5af875922cdb5bd586cc1f1033295aa9ff63ee196a87519ac8e1"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e603a3bff50db08cd578d54f07032ca1631450ceb972c2f834c2b860c28ea2"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f"}, + {file = "httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0"}, + {file = "httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8"}, + {file = "httptools-0.6.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:d3f0d369e7ffbe59c4b6116a44d6a8eb4783aae027f2c0b366cf0aa964185dba"}, + {file = "httptools-0.6.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:94978a49b8f4569ad607cd4946b759d90b285e39c0d4640c6b36ca7a3ddf2efc"}, + {file = "httptools-0.6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40dc6a8e399e15ea525305a2ddba998b0af5caa2566bcd79dcbe8948181eeaff"}, + {file = "httptools-0.6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab9ba8dcf59de5181f6be44a77458e45a578fc99c31510b8c65b7d5acc3cf490"}, + {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fc411e1c0a7dcd2f902c7c48cf079947a7e65b5485dea9decb82b9105ca71a43"}, + {file = "httptools-0.6.4-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:d54efd20338ac52ba31e7da78e4a72570cf729fac82bc31ff9199bedf1dc7440"}, + {file = "httptools-0.6.4-cp38-cp38-win_amd64.whl", hash = "sha256:df959752a0c2748a65ab5387d08287abf6779ae9165916fe053e68ae1fbdc47f"}, + {file = "httptools-0.6.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:85797e37e8eeaa5439d33e556662cc370e474445d5fab24dcadc65a8ffb04003"}, + {file = "httptools-0.6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:db353d22843cf1028f43c3651581e4bb49374d85692a85f95f7b9a130e1b2cab"}, + {file = "httptools-0.6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ffd262a73d7c28424252381a5b854c19d9de5f56f075445d33919a637e3547"}, + {file = "httptools-0.6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c346571fa50d2e9856a37d7cd9435a25e7fd15e236c397bf224afaa355fe9"}, + {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:aafe0f1918ed07b67c1e838f950b1c1fabc683030477e60b335649b8020e1076"}, + {file = "httptools-0.6.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0e563e54979e97b6d13f1bbc05a96109923e76b901f786a5eae36e99c01237bd"}, + {file = "httptools-0.6.4-cp39-cp39-win_amd64.whl", hash = "sha256:b799de31416ecc589ad79dd85a0b2657a8fe39327944998dea368c1d4c9e55e6"}, + {file = "httptools-0.6.4.tar.gz", hash = "sha256:4e93eee4add6493b59a5c514da98c939b244fce4a0d8879cd3f466562f4b7d5c"}, ] [package.extras] -test = ["Cython (==0.29.22)"] +test = ["Cython (>=0.29.24)"] [[package]] name = "httpx" -version = "0.18.1" +version = "0.28.1" description = "The next generation HTTP client." optional = false -python-versions = ">=3.6" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "httpx-0.18.1-py3-none-any.whl", hash = "sha256:ad2e3db847be736edc4b272c4d5788790a7e5789ef132fc6b5fef8aeb9e9f6e0"}, - {file = "httpx-0.18.1.tar.gz", hash = "sha256:0a2651dd2b9d7662c70d12ada5c290abcf57373b9633515fe4baa9f62566086f"}, + {file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"}, + {file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"}, ] [package.dependencies] +anyio = "*" certifi = "*" -httpcore = ">=0.13.0,<0.14.0" -rfc3986 = {version = ">=1.3,<2", extras = ["idna2008"]} -sniffio = "*" +httpcore = "==1.*" +idna = "*" [package.extras] -brotli = ["brotlicffi (==1.*)"] -http2 = ["h2 (==3.*)"] +brotli = ["brotli", "brotlicffi"] +cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (==1.*)"] +zstd = ["zstandard (>=0.18.0)"] [[package]] name = "idna" @@ -839,6 +974,18 @@ files = [ [package.extras] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + [[package]] name = "jmespath" version = "1.0.1" @@ -1037,87 +1184,91 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "orjson" -version = "3.10.14" +version = "3.10.15" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "orjson-3.10.14-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:849ea7845a55f09965826e816cdc7689d6cf74fe9223d79d758c714af955bcb6"}, - {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5947b139dfa33f72eecc63f17e45230a97e741942955a6c9e650069305eb73d"}, - {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cde6d76910d3179dae70f164466692f4ea36da124d6fb1a61399ca589e81d69a"}, - {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6dfbaeb7afa77ca608a50e2770a0461177b63a99520d4928e27591b142c74b1"}, - {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa45e489ef80f28ff0e5ba0a72812b8cfc7c1ef8b46a694723807d1b07c89ebb"}, - {file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f5007abfdbb1d866e2aa8990bd1c465f0f6da71d19e695fc278282be12cffa5"}, - {file = "orjson-3.10.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1b49e2af011c84c3f2d541bb5cd1e3c7c2df672223e7e3ea608f09cf295e5f8a"}, - {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:164ac155109226b3a2606ee6dda899ccfbe6e7e18b5bdc3fbc00f79cc074157d"}, - {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6b1225024cf0ef5d15934b5ffe9baf860fe8bc68a796513f5ea4f5056de30bca"}, - {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d6546e8073dc382e60fcae4a001a5a1bc46da5eab4a4878acc2d12072d6166d5"}, - {file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9f1d2942605c894162252d6259b0121bf1cb493071a1ea8cb35d79cb3e6ac5bc"}, - {file = "orjson-3.10.14-cp310-cp310-win32.whl", hash = "sha256:397083806abd51cf2b3bbbf6c347575374d160331a2d33c5823e22249ad3118b"}, - {file = "orjson-3.10.14-cp310-cp310-win_amd64.whl", hash = "sha256:fa18f949d3183a8d468367056be989666ac2bef3a72eece0bade9cdb733b3c28"}, - {file = "orjson-3.10.14-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f506fd666dd1ecd15a832bebc66c4df45c1902fd47526292836c339f7ba665a9"}, - {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efe5fd254cfb0eeee13b8ef7ecb20f5d5a56ddda8a587f3852ab2cedfefdb5f6"}, - {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ddc8c866d7467f5ee2991397d2ea94bcf60d0048bdd8ca555740b56f9042725"}, - {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af8e42ae4363773658b8d578d56dedffb4f05ceeb4d1d4dd3fb504950b45526"}, - {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84dd83110503bc10e94322bf3ffab8bc49150176b49b4984dc1cce4c0a993bf9"}, - {file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36f5bfc0399cd4811bf10ec7a759c7ab0cd18080956af8ee138097d5b5296a95"}, - {file = "orjson-3.10.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868943660fb2a1e6b6b965b74430c16a79320b665b28dd4511d15ad5038d37d5"}, - {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33449c67195969b1a677533dee9d76e006001213a24501333624623e13c7cc8e"}, - {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e4c9f60f9fb0b5be66e416dcd8c9d94c3eabff3801d875bdb1f8ffc12cf86905"}, - {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0de4d6315cfdbd9ec803b945c23b3a68207fd47cbe43626036d97e8e9561a436"}, - {file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:83adda3db595cb1a7e2237029b3249c85afbe5c747d26b41b802e7482cb3933e"}, - {file = "orjson-3.10.14-cp311-cp311-win32.whl", hash = "sha256:998019ef74a4997a9d741b1473533cdb8faa31373afc9849b35129b4b8ec048d"}, - {file = "orjson-3.10.14-cp311-cp311-win_amd64.whl", hash = "sha256:9d034abdd36f0f0f2240f91492684e5043d46f290525d1117712d5b8137784eb"}, - {file = "orjson-3.10.14-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2ad4b7e367efba6dc3f119c9a0fcd41908b7ec0399a696f3cdea7ec477441b09"}, - {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f496286fc85e93ce0f71cc84fc1c42de2decf1bf494094e188e27a53694777a7"}, - {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c7f189bbfcded40e41a6969c1068ba305850ba016665be71a217918931416fbf"}, - {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cc8204f0b75606869c707da331058ddf085de29558b516fc43c73ee5ee2aadb"}, - {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deaa2899dff7f03ab667e2ec25842d233e2a6a9e333efa484dfe666403f3501c"}, - {file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1c3ea52642c9714dc6e56de8a451a066f6d2707d273e07fe8a9cc1ba073813d"}, - {file = "orjson-3.10.14-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9d3f9ed72e7458ded9a1fb1b4d4ed4c4fdbaf82030ce3f9274b4dc1bff7ace2b"}, - {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:07520685d408a2aba514c17ccc16199ff2934f9f9e28501e676c557f454a37fe"}, - {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:76344269b550ea01488d19a2a369ab572c1ac4449a72e9f6ac0d70eb1cbfb953"}, - {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e2979d0f2959990620f7e62da6cd954e4620ee815539bc57a8ae46e2dacf90e3"}, - {file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03f61ca3674555adcb1aa717b9fc87ae936aa7a63f6aba90a474a88701278780"}, - {file = "orjson-3.10.14-cp312-cp312-win32.whl", hash = "sha256:d5075c54edf1d6ad81d4c6523ce54a748ba1208b542e54b97d8a882ecd810fd1"}, - {file = "orjson-3.10.14-cp312-cp312-win_amd64.whl", hash = "sha256:175cafd322e458603e8ce73510a068d16b6e6f389c13f69bf16de0e843d7d406"}, - {file = "orjson-3.10.14-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:0905ca08a10f7e0e0c97d11359609300eb1437490a7f32bbaa349de757e2e0c7"}, - {file = "orjson-3.10.14-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92d13292249f9f2a3e418cbc307a9fbbef043c65f4bd8ba1eb620bc2aaba3d15"}, - {file = "orjson-3.10.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90937664e776ad316d64251e2fa2ad69265e4443067668e4727074fe39676414"}, - {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9ed3d26c4cb4f6babaf791aa46a029265850e80ec2a566581f5c2ee1a14df4f1"}, - {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:56ee546c2bbe9599aba78169f99d1dc33301853e897dbaf642d654248280dc6e"}, - {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:901e826cb2f1bdc1fcef3ef59adf0c451e8f7c0b5deb26c1a933fb66fb505eae"}, - {file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:26336c0d4b2d44636e1e1e6ed1002f03c6aae4a8a9329561c8883f135e9ff010"}, - {file = "orjson-3.10.14-cp313-cp313-win32.whl", hash = "sha256:e2bc525e335a8545c4e48f84dd0328bc46158c9aaeb8a1c2276546e94540ea3d"}, - {file = "orjson-3.10.14-cp313-cp313-win_amd64.whl", hash = "sha256:eca04dfd792cedad53dc9a917da1a522486255360cb4e77619343a20d9f35364"}, - {file = "orjson-3.10.14-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9a0fba3b8a587a54c18585f077dcab6dd251c170d85cfa4d063d5746cd595a0f"}, - {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:175abf3d20e737fec47261d278f95031736a49d7832a09ab684026528c4d96db"}, - {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:29ca1a93e035d570e8b791b6c0feddd403c6a5388bfe870bf2aa6bba1b9d9b8e"}, - {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f77202c80e8ab5a1d1e9faf642343bee5aaf332061e1ada4e9147dbd9eb00c46"}, - {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e2ec73b7099b6a29b40a62e08a23b936423bd35529f8f55c42e27acccde7954"}, - {file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2d1679df9f9cd9504f8dff24555c1eaabba8aad7f5914f28dab99e3c2552c9d"}, - {file = "orjson-3.10.14-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:691ab9a13834310a263664313e4f747ceb93662d14a8bdf20eb97d27ed488f16"}, - {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b11ed82054fce82fb74cea33247d825d05ad6a4015ecfc02af5fbce442fbf361"}, - {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:e70a1d62b8288677d48f3bea66c21586a5f999c64ecd3878edb7393e8d1b548d"}, - {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:16642f10c1ca5611251bd835de9914a4b03095e28a34c8ba6a5500b5074338bd"}, - {file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3871bad546aa66c155e3f36f99c459780c2a392d502a64e23fb96d9abf338511"}, - {file = "orjson-3.10.14-cp38-cp38-win32.whl", hash = "sha256:0293a88815e9bb5c90af4045f81ed364d982f955d12052d989d844d6c4e50945"}, - {file = "orjson-3.10.14-cp38-cp38-win_amd64.whl", hash = "sha256:6169d3868b190d6b21adc8e61f64e3db30f50559dfbdef34a1cd6c738d409dfc"}, - {file = "orjson-3.10.14-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:06d4ec218b1ec1467d8d64da4e123b4794c781b536203c309ca0f52819a16c03"}, - {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:962c2ec0dcaf22b76dee9831fdf0c4a33d4bf9a257a2bc5d4adc00d5c8ad9034"}, - {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:21d3be4132f71ef1360385770474f29ea1538a242eef72ac4934fe142800e37f"}, - {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28ed60597c149a9e3f5ad6dd9cebaee6fb2f0e3f2d159a4a2b9b862d4748860"}, - {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e947f70167fe18469f2023644e91ab3d24f9aed69a5e1c78e2c81b9cea553fb"}, - {file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64410696c97a35af2432dea7bdc4ce32416458159430ef1b4beb79fd30093ad6"}, - {file = "orjson-3.10.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8050a5d81c022561ee29cd2739de5b4445f3c72f39423fde80a63299c1892c52"}, - {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b49a28e30d3eca86db3fe6f9b7f4152fcacbb4a467953cd1b42b94b479b77956"}, - {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ca041ad20291a65d853a9523744eebc3f5a4b2f7634e99f8fe88320695ddf766"}, - {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d313a2998b74bb26e9e371851a173a9b9474764916f1fc7971095699b3c6e964"}, - {file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7796692136a67b3e301ef9052bde6fe8e7bd5200da766811a3a608ffa62aaff0"}, - {file = "orjson-3.10.14-cp39-cp39-win32.whl", hash = "sha256:eee4bc767f348fba485ed9dc576ca58b0a9eac237f0e160f7a59bce628ed06b3"}, - {file = "orjson-3.10.14-cp39-cp39-win_amd64.whl", hash = "sha256:96a1c0ee30fb113b3ae3c748fd75ca74a157ff4c58476c47db4d61518962a011"}, - {file = "orjson-3.10.14.tar.gz", hash = "sha256:cf31f6f071a6b8e7aa1ead1fa27b935b48d00fbfa6a28ce856cfff2d5dd68eed"}, + {file = "orjson-3.10.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:552c883d03ad185f720d0c09583ebde257e41b9521b74ff40e08b7dec4559c04"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:616e3e8d438d02e4854f70bfdc03a6bcdb697358dbaa6bcd19cbe24d24ece1f8"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c2c79fa308e6edb0ffab0a31fd75a7841bf2a79a20ef08a3c6e3b26814c8ca8"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cb85490aa6bf98abd20607ab5c8324c0acb48d6da7863a51be48505646c814"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763dadac05e4e9d2bc14938a45a2d0560549561287d41c465d3c58aec818b164"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a330b9b4734f09a623f74a7490db713695e13b67c959713b78369f26b3dee6bf"}, + {file = "orjson-3.10.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a61a4622b7ff861f019974f73d8165be1bd9a0855e1cad18ee167acacabeb061"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd271247691574416b3228db667b84775c497b245fa275c6ab90dc1ffbbd2b3"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:e4759b109c37f635aa5c5cc93a1b26927bfde24b254bcc0e1149a9fada253d2d"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e992fd5cfb8b9f00bfad2fd7a05a4299db2bbe92e6440d9dd2fab27655b3182"}, + {file = "orjson-3.10.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f95fb363d79366af56c3f26b71df40b9a583b07bbaaf5b317407c4d58497852e"}, + {file = "orjson-3.10.15-cp310-cp310-win32.whl", hash = "sha256:f9875f5fea7492da8ec2444839dcc439b0ef298978f311103d0b7dfd775898ab"}, + {file = "orjson-3.10.15-cp310-cp310-win_amd64.whl", hash = "sha256:17085a6aa91e1cd70ca8533989a18b5433e15d29c574582f76f821737c8d5806"}, + {file = "orjson-3.10.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c4cc83960ab79a4031f3119cc4b1a1c627a3dc09df125b27c4201dff2af7eaa6"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddbeef2481d895ab8be5185f2432c334d6dec1f5d1933a9c83014d188e102cef"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e590a0477b23ecd5b0ac865b1b907b01b3c5535f5e8a8f6ab0e503efb896334"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a6be38bd103d2fd9bdfa31c2720b23b5d47c6796bcb1d1b598e3924441b4298d"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ff4f6edb1578960ed628a3b998fa54d78d9bb3e2eb2cfc5c2a09732431c678d0"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0482b21d0462eddd67e7fce10b89e0b6ac56570424662b685a0d6fccf581e13"}, + {file = "orjson-3.10.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bb5cc3527036ae3d98b65e37b7986a918955f85332c1ee07f9d3f82f3a6899b5"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d569c1c462912acdd119ccbf719cf7102ea2c67dd03b99edcb1a3048651ac96b"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:1e6d33efab6b71d67f22bf2962895d3dc6f82a6273a965fab762e64fa90dc399"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c33be3795e299f565681d69852ac8c1bc5c84863c0b0030b2b3468843be90388"}, + {file = "orjson-3.10.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eea80037b9fae5339b214f59308ef0589fc06dc870578b7cce6d71eb2096764c"}, + {file = "orjson-3.10.15-cp311-cp311-win32.whl", hash = "sha256:d5ac11b659fd798228a7adba3e37c010e0152b78b1982897020a8e019a94882e"}, + {file = "orjson-3.10.15-cp311-cp311-win_amd64.whl", hash = "sha256:cf45e0214c593660339ef63e875f32ddd5aa3b4adc15e662cdb80dc49e194f8e"}, + {file = "orjson-3.10.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9d11c0714fc85bfcf36ada1179400862da3288fc785c30e8297844c867d7505a"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dba5a1e85d554e3897fa9fe6fbcff2ed32d55008973ec9a2b992bd9a65d2352d"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7723ad949a0ea502df656948ddd8b392780a5beaa4c3b5f97e525191b102fff0"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6fd9bc64421e9fe9bd88039e7ce8e58d4fead67ca88e3a4014b143cec7684fd4"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dadba0e7b6594216c214ef7894c4bd5f08d7c0135f4dd0145600be4fbcc16767"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b48f59114fe318f33bbaee8ebeda696d8ccc94c9e90bc27dbe72153094e26f41"}, + {file = "orjson-3.10.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:035fb83585e0f15e076759b6fedaf0abb460d1765b6a36f48018a52858443514"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d13b7fe322d75bf84464b075eafd8e7dd9eae05649aa2a5354cfa32f43c59f17"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:7066b74f9f259849629e0d04db6609db4cf5b973248f455ba5d3bd58a4daaa5b"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88dc3f65a026bd3175eb157fea994fca6ac7c4c8579fc5a86fc2114ad05705b7"}, + {file = "orjson-3.10.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b342567e5465bd99faa559507fe45e33fc76b9fb868a63f1642c6bc0735ad02a"}, + {file = "orjson-3.10.15-cp312-cp312-win32.whl", hash = "sha256:0a4f27ea5617828e6b58922fdbec67b0aa4bb844e2d363b9244c47fa2180e665"}, + {file = "orjson-3.10.15-cp312-cp312-win_amd64.whl", hash = "sha256:ef5b87e7aa9545ddadd2309efe6824bd3dd64ac101c15dae0f2f597911d46eaa"}, + {file = "orjson-3.10.15-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:bae0e6ec2b7ba6895198cd981b7cca95d1487d0147c8ed751e5632ad16f031a6"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f93ce145b2db1252dd86af37d4165b6faa83072b46e3995ecc95d4b2301b725a"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c203f6f969210128af3acae0ef9ea6aab9782939f45f6fe02d05958fe761ef9"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8918719572d662e18b8af66aef699d8c21072e54b6c82a3f8f6404c1f5ccd5e0"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f71eae9651465dff70aa80db92586ad5b92df46a9373ee55252109bb6b703307"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e117eb299a35f2634e25ed120c37c641398826c2f5a3d3cc39f5993b96171b9e"}, + {file = "orjson-3.10.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13242f12d295e83c2955756a574ddd6741c81e5b99f2bef8ed8d53e47a01e4b7"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7946922ada8f3e0b7b958cc3eb22cfcf6c0df83d1fe5521b4a100103e3fa84c8"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:b7155eb1623347f0f22c38c9abdd738b287e39b9982e1da227503387b81b34ca"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:208beedfa807c922da4e81061dafa9c8489c6328934ca2a562efa707e049e561"}, + {file = "orjson-3.10.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eca81f83b1b8c07449e1d6ff7074e82e3fd6777e588f1a6632127f286a968825"}, + {file = "orjson-3.10.15-cp313-cp313-win32.whl", hash = "sha256:c03cd6eea1bd3b949d0d007c8d57049aa2b39bd49f58b4b2af571a5d3833d890"}, + {file = "orjson-3.10.15-cp313-cp313-win_amd64.whl", hash = "sha256:fd56a26a04f6ba5fb2045b0acc487a63162a958ed837648c5781e1fe3316cfbf"}, + {file = "orjson-3.10.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5e8afd6200e12771467a1a44e5ad780614b86abb4b11862ec54861a82d677746"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da9a18c500f19273e9e104cca8c1f0b40a6470bcccfc33afcc088045d0bf5ea6"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bb00b7bfbdf5d34a13180e4805d76b4567025da19a197645ca746fc2fb536586"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:33aedc3d903378e257047fee506f11e0833146ca3e57a1a1fb0ddb789876c1e1"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd0099ae6aed5eb1fc84c9eb72b95505a3df4267e6962eb93cdd5af03be71c98"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c864a80a2d467d7786274fce0e4f93ef2a7ca4ff31f7fc5634225aaa4e9e98c"}, + {file = "orjson-3.10.15-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c25774c9e88a3e0013d7d1a6c8056926b607a61edd423b50eb5c88fd7f2823ae"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e78c211d0074e783d824ce7bb85bf459f93a233eb67a5b5003498232ddfb0e8a"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:43e17289ffdbbac8f39243916c893d2ae41a2ea1a9cbb060a56a4d75286351ae"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:781d54657063f361e89714293c095f506c533582ee40a426cb6489c48a637b81"}, + {file = "orjson-3.10.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6875210307d36c94873f553786a808af2788e362bd0cf4c8e66d976791e7b528"}, + {file = "orjson-3.10.15-cp38-cp38-win32.whl", hash = "sha256:305b38b2b8f8083cc3d618927d7f424349afce5975b316d33075ef0f73576b60"}, + {file = "orjson-3.10.15-cp38-cp38-win_amd64.whl", hash = "sha256:5dd9ef1639878cc3efffed349543cbf9372bdbd79f478615a1c633fe4e4180d1"}, + {file = "orjson-3.10.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ffe19f3e8d68111e8644d4f4e267a069ca427926855582ff01fc012496d19969"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d433bf32a363823863a96561a555227c18a522a8217a6f9400f00ddc70139ae2"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:da03392674f59a95d03fa5fb9fe3a160b0511ad84b7a3914699ea5a1b3a38da2"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3a63bb41559b05360ded9132032239e47983a39b151af1201f07ec9370715c82"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3766ac4702f8f795ff3fa067968e806b4344af257011858cc3d6d8721588b53f"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a1c73dcc8fadbd7c55802d9aa093b36878d34a3b3222c41052ce6b0fc65f8e8"}, + {file = "orjson-3.10.15-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b299383825eafe642cbab34be762ccff9fd3408d72726a6b2a4506d410a71ab3"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:abc7abecdbf67a173ef1316036ebbf54ce400ef2300b4e26a7b843bd446c2480"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:3614ea508d522a621384c1d6639016a5a2e4f027f3e4a1c93a51867615d28829"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:295c70f9dc154307777ba30fe29ff15c1bcc9dfc5c48632f37d20a607e9ba85a"}, + {file = "orjson-3.10.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:63309e3ff924c62404923c80b9e2048c1f74ba4b615e7584584389ada50ed428"}, + {file = "orjson-3.10.15-cp39-cp39-win32.whl", hash = "sha256:a2f708c62d026fb5340788ba94a55c23df4e1869fec74be455e0b2f5363b8507"}, + {file = "orjson-3.10.15-cp39-cp39-win_amd64.whl", hash = "sha256:efcf6c735c3d22ef60c4aa27a5238f1a477df85e9b15f2142f9d669beb2d13fd"}, + {file = "orjson-3.10.15.tar.gz", hash = "sha256:05ca7fe452a2e9d8d9d706a2984c95b9c2ebc5db417ce0b7a49b91d50642a23e"}, ] [[package]] @@ -1181,6 +1332,39 @@ pytz = ">=2017.3" [package.extras] test = ["hypothesis (>=3.58)", "pytest (>=5.0.1)", "pytest-xdist"] +[[package]] +name = "platformdirs" +version = "4.3.6" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] + +[[package]] +name = "pluggy" +version = "1.5.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, + {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + [[package]] name = "propcache" version = "0.2.1" @@ -1287,143 +1471,219 @@ files = [ [[package]] name = "pydantic" -version = "2.3.0" +version = "2.10.5" description = "Data validation using Python type hints" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic-2.3.0-py3-none-any.whl", hash = "sha256:45b5e446c6dfaad9444819a293b921a40e1db1aa61ea08aede0522529ce90e81"}, - {file = "pydantic-2.3.0.tar.gz", hash = "sha256:1607cc106602284cd4a00882986570472f193fde9cb1259bceeaedb26aa79a6d"}, + {file = "pydantic-2.10.5-py3-none-any.whl", hash = "sha256:4dd4e322dbe55472cb7ca7e73f4b63574eecccf2835ffa2af9021ce113c83c53"}, + {file = "pydantic-2.10.5.tar.gz", hash = "sha256:278b38dbbaec562011d659ee05f63346951b3a248a6f3642e1bc68894ea2b4ff"}, ] [package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.6.3" -typing-extensions = ">=4.6.1" +annotated-types = ">=0.6.0" +pydantic-core = "2.27.2" +typing-extensions = ">=4.12.2" [package.extras] email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.6.3" -description = "" +version = "2.27.2" +description = "Core functionality for Pydantic validation and serialization" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:1a0ddaa723c48af27d19f27f1c73bdc615c73686d763388c8683fe34ae777bad"}, - {file = "pydantic_core-2.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5cfde4fab34dd1e3a3f7f3db38182ab6c95e4ea91cf322242ee0be5c2f7e3d2f"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5493a7027bfc6b108e17c3383959485087d5942e87eb62bbac69829eae9bc1f7"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:84e87c16f582f5c753b7f39a71bd6647255512191be2d2dbf49458c4ef024588"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:522a9c4a4d1924facce7270c84b5134c5cabcb01513213662a2e89cf28c1d309"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaafc776e5edc72b3cad1ccedb5fd869cc5c9a591f1213aa9eba31a781be9ac1"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a750a83b2728299ca12e003d73d1264ad0440f60f4fc9cee54acc489249b728"}, - {file = "pydantic_core-2.6.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9e8b374ef41ad5c461efb7a140ce4730661aadf85958b5c6a3e9cf4e040ff4bb"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b594b64e8568cf09ee5c9501ede37066b9fc41d83d58f55b9952e32141256acd"}, - {file = "pydantic_core-2.6.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2a20c533cb80466c1d42a43a4521669ccad7cf2967830ac62c2c2f9cece63e7e"}, - {file = "pydantic_core-2.6.3-cp310-none-win32.whl", hash = "sha256:04fe5c0a43dec39aedba0ec9579001061d4653a9b53a1366b113aca4a3c05ca7"}, - {file = "pydantic_core-2.6.3-cp310-none-win_amd64.whl", hash = "sha256:6bf7d610ac8f0065a286002a23bcce241ea8248c71988bda538edcc90e0c39ad"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:6bcc1ad776fffe25ea5c187a028991c031a00ff92d012ca1cc4714087e575973"}, - {file = "pydantic_core-2.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:df14f6332834444b4a37685810216cc8fe1fe91f447332cd56294c984ecbff1c"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0b7486d85293f7f0bbc39b34e1d8aa26210b450bbd3d245ec3d732864009819"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a892b5b1871b301ce20d40b037ffbe33d1407a39639c2b05356acfef5536d26a"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:883daa467865e5766931e07eb20f3e8152324f0adf52658f4d302242c12e2c32"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d4eb77df2964b64ba190eee00b2312a1fd7a862af8918ec70fc2d6308f76ac64"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce8c84051fa292a5dc54018a40e2a1926fd17980a9422c973e3ebea017aa8da"}, - {file = "pydantic_core-2.6.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:22134a4453bd59b7d1e895c455fe277af9d9d9fbbcb9dc3f4a97b8693e7e2c9b"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:02e1c385095efbd997311d85c6021d32369675c09bcbfff3b69d84e59dc103f6"}, - {file = "pydantic_core-2.6.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d79f1f2f7ebdb9b741296b69049ff44aedd95976bfee38eb4848820628a99b50"}, - {file = "pydantic_core-2.6.3-cp311-none-win32.whl", hash = "sha256:430ddd965ffd068dd70ef4e4d74f2c489c3a313adc28e829dd7262cc0d2dd1e8"}, - {file = "pydantic_core-2.6.3-cp311-none-win_amd64.whl", hash = "sha256:84f8bb34fe76c68c9d96b77c60cef093f5e660ef8e43a6cbfcd991017d375950"}, - {file = "pydantic_core-2.6.3-cp311-none-win_arm64.whl", hash = "sha256:5a2a3c9ef904dcdadb550eedf3291ec3f229431b0084666e2c2aa8ff99a103a2"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:8421cf496e746cf8d6b677502ed9a0d1e4e956586cd8b221e1312e0841c002d5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bb128c30cf1df0ab78166ded1ecf876620fb9aac84d2413e8ea1594b588c735d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37a822f630712817b6ecc09ccc378192ef5ff12e2c9bae97eb5968a6cdf3b862"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:240a015102a0c0cc8114f1cba6444499a8a4d0333e178bc504a5c2196defd456"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f90e5e3afb11268628c89f378f7a1ea3f2fe502a28af4192e30a6cdea1e7d5e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:340e96c08de1069f3d022a85c2a8c63529fd88709468373b418f4cf2c949fb0e"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1480fa4682e8202b560dcdc9eeec1005f62a15742b813c88cdc01d44e85308e5"}, - {file = "pydantic_core-2.6.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f14546403c2a1d11a130b537dda28f07eb6c1805a43dae4617448074fd49c282"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a87c54e72aa2ef30189dc74427421e074ab4561cf2bf314589f6af5b37f45e6d"}, - {file = "pydantic_core-2.6.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f93255b3e4d64785554e544c1c76cd32f4a354fa79e2eeca5d16ac2e7fdd57aa"}, - {file = "pydantic_core-2.6.3-cp312-none-win32.whl", hash = "sha256:f70dc00a91311a1aea124e5f64569ea44c011b58433981313202c46bccbec0e1"}, - {file = "pydantic_core-2.6.3-cp312-none-win_amd64.whl", hash = "sha256:23470a23614c701b37252618e7851e595060a96a23016f9a084f3f92f5ed5881"}, - {file = "pydantic_core-2.6.3-cp312-none-win_arm64.whl", hash = "sha256:1ac1750df1b4339b543531ce793b8fd5c16660a95d13aecaab26b44ce11775e9"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:a53e3195f134bde03620d87a7e2b2f2046e0e5a8195e66d0f244d6d5b2f6d31b"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f2969e8f72c6236c51f91fbb79c33821d12a811e2a94b7aa59c65f8dbdfad34a"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:672174480a85386dd2e681cadd7d951471ad0bb028ed744c895f11f9d51b9ebe"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:002d0ea50e17ed982c2d65b480bd975fc41086a5a2f9c924ef8fc54419d1dea3"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ccc13afee44b9006a73d2046068d4df96dc5b333bf3509d9a06d1b42db6d8bf"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:439a0de139556745ae53f9cc9668c6c2053444af940d3ef3ecad95b079bc9987"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d63b7545d489422d417a0cae6f9898618669608750fc5e62156957e609e728a5"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b44c42edc07a50a081672e25dfe6022554b47f91e793066a7b601ca290f71e42"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1c721bfc575d57305dd922e6a40a8fe3f762905851d694245807a351ad255c58"}, - {file = "pydantic_core-2.6.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e4a2cf8c4543f37f5dc881de6c190de08096c53986381daebb56a355be5dfe6"}, - {file = "pydantic_core-2.6.3-cp37-none-win32.whl", hash = "sha256:d9b4916b21931b08096efed090327f8fe78e09ae8f5ad44e07f5c72a7eedb51b"}, - {file = "pydantic_core-2.6.3-cp37-none-win_amd64.whl", hash = "sha256:a8acc9dedd304da161eb071cc7ff1326aa5b66aadec9622b2574ad3ffe225525"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:5e9c068f36b9f396399d43bfb6defd4cc99c36215f6ff33ac8b9c14ba15bdf6b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e61eae9b31799c32c5f9b7be906be3380e699e74b2db26c227c50a5fc7988698"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d85463560c67fc65cd86153a4975d0b720b6d7725cf7ee0b2d291288433fc21b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9616567800bdc83ce136e5847d41008a1d602213d024207b0ff6cab6753fe645"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e9b65a55bbabda7fccd3500192a79f6e474d8d36e78d1685496aad5f9dbd92c"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f468d520f47807d1eb5d27648393519655eadc578d5dd862d06873cce04c4d1b"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9680dd23055dd874173a3a63a44e7f5a13885a4cfd7e84814be71be24fba83db"}, - {file = "pydantic_core-2.6.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a718d56c4d55efcfc63f680f207c9f19c8376e5a8a67773535e6f7e80e93170"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8ecbac050856eb6c3046dea655b39216597e373aa8e50e134c0e202f9c47efec"}, - {file = "pydantic_core-2.6.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:788be9844a6e5c4612b74512a76b2153f1877cd845410d756841f6c3420230eb"}, - {file = "pydantic_core-2.6.3-cp38-none-win32.whl", hash = "sha256:07a1aec07333bf5adebd8264047d3dc518563d92aca6f2f5b36f505132399efc"}, - {file = "pydantic_core-2.6.3-cp38-none-win_amd64.whl", hash = "sha256:621afe25cc2b3c4ba05fff53525156d5100eb35c6e5a7cf31d66cc9e1963e378"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:813aab5bfb19c98ae370952b6f7190f1e28e565909bfc219a0909db168783465"}, - {file = "pydantic_core-2.6.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:50555ba3cb58f9861b7a48c493636b996a617db1a72c18da4d7f16d7b1b9952b"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e20f8baedd7d987bd3f8005c146e6bcbda7cdeefc36fad50c66adb2dd2da48"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b0a5d7edb76c1c57b95df719af703e796fc8e796447a1da939f97bfa8a918d60"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f06e21ad0b504658a3a9edd3d8530e8cea5723f6ea5d280e8db8efc625b47e49"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea053cefa008fda40f92aab937fb9f183cf8752e41dbc7bc68917884454c6362"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:171a4718860790f66d6c2eda1d95dd1edf64f864d2e9f9115840840cf5b5713f"}, - {file = "pydantic_core-2.6.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ed7ceca6aba5331ece96c0e328cd52f0dcf942b8895a1ed2642de50800b79d3"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:acafc4368b289a9f291e204d2c4c75908557d4f36bd3ae937914d4529bf62a76"}, - {file = "pydantic_core-2.6.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1aa712ba150d5105814e53cb141412217146fedc22621e9acff9236d77d2a5ef"}, - {file = "pydantic_core-2.6.3-cp39-none-win32.whl", hash = "sha256:44b4f937b992394a2e81a5c5ce716f3dcc1237281e81b80c748b2da6dd5cf29a"}, - {file = "pydantic_core-2.6.3-cp39-none-win_amd64.whl", hash = "sha256:9b33bf9658cb29ac1a517c11e865112316d09687d767d7a0e4a63d5c640d1b17"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d7050899026e708fb185e174c63ebc2c4ee7a0c17b0a96ebc50e1f76a231c057"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:99faba727727b2e59129c59542284efebbddade4f0ae6a29c8b8d3e1f437beb7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fa159b902d22b283b680ef52b532b29554ea2a7fc39bf354064751369e9dbd7"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:046af9cfb5384f3684eeb3f58a48698ddab8dd870b4b3f67f825353a14441418"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:930bfe73e665ebce3f0da2c6d64455098aaa67e1a00323c74dc752627879fc67"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85cc4d105747d2aa3c5cf3e37dac50141bff779545ba59a095f4a96b0a460e70"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:b25afe9d5c4f60dcbbe2b277a79be114e2e65a16598db8abee2a2dcde24f162b"}, - {file = "pydantic_core-2.6.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e49ce7dc9f925e1fb010fc3d555250139df61fa6e5a0a95ce356329602c11ea9"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2dd50d6a1aef0426a1d0199190c6c43ec89812b1f409e7fe44cb0fbf6dfa733c"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6595b0d8c8711e8e1dc389d52648b923b809f68ac1c6f0baa525c6440aa0daa"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ef724a059396751aef71e847178d66ad7fc3fc969a1a40c29f5aac1aa5f8784"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3c8945a105f1589ce8a693753b908815e0748f6279959a4530f6742e1994dcb6"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:c8c6660089a25d45333cb9db56bb9e347241a6d7509838dbbd1931d0e19dbc7f"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:692b4ff5c4e828a38716cfa92667661a39886e71136c97b7dac26edef18767f7"}, - {file = "pydantic_core-2.6.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1a5d8f18877474c80b7711d870db0eeef9442691fcdb00adabfc97e183ee0b0"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3796a6152c545339d3b1652183e786df648ecdf7c4f9347e1d30e6750907f5bb"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:b962700962f6e7a6bd77e5f37320cabac24b4c0f76afeac05e9f93cf0c620014"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56ea80269077003eaa59723bac1d8bacd2cd15ae30456f2890811efc1e3d4413"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75c0ebbebae71ed1e385f7dfd9b74c1cff09fed24a6df43d326dd7f12339ec34"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:252851b38bad3bfda47b104ffd077d4f9604a10cb06fe09d020016a25107bf98"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:6656a0ae383d8cd7cc94e91de4e526407b3726049ce8d7939049cbfa426518c8"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d9140ded382a5b04a1c030b593ed9bf3088243a0a8b7fa9f071a5736498c5483"}, - {file = "pydantic_core-2.6.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d38bbcef58220f9c81e42c255ef0bf99735d8f11edef69ab0b499da77105158a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c9d469204abcca28926cbc28ce98f28e50e488767b084fb3fbdf21af11d3de26"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:48c1ed8b02ffea4d5c9c220eda27af02b8149fe58526359b3c07eb391cb353a2"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b2b1bfed698fa410ab81982f681f5b1996d3d994ae8073286515ac4d165c2e7"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf9d42a71a4d7a7c1f14f629e5c30eac451a6fc81827d2beefd57d014c006c4a"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4292ca56751aebbe63a84bbfc3b5717abb09b14d4b4442cc43fd7c49a1529efd"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7dc2ce039c7290b4ef64334ec7e6ca6494de6eecc81e21cb4f73b9b39991408c"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:615a31b1629e12445c0e9fc8339b41aaa6cc60bd53bf802d5fe3d2c0cda2ae8d"}, - {file = "pydantic_core-2.6.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1fa1f6312fb84e8c281f32b39affe81984ccd484da6e9d65b3d18c202c666149"}, - {file = "pydantic_core-2.6.3.tar.gz", hash = "sha256:1508f37ba9e3ddc0189e6ff4e2228bd2d3c3a4641cbe8c07177162f76ed696c7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pyproject-api" +version = "1.9.0" +description = "API to interact with the python pyproject.toml based projects" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pyproject_api-1.9.0-py3-none-any.whl", hash = "sha256:326df9d68dea22d9d98b5243c46e3ca3161b07a1b9b18e213d1e24fd0e605766"}, + {file = "pyproject_api-1.9.0.tar.gz", hash = "sha256:7e8a9854b2dfb49454fae421cb86af43efbb2b2454e5646ffb7623540321ae6e"}, +] + +[package.dependencies] +packaging = ">=24.2" +tomli = {version = ">=2.2.1", markers = "python_version < \"3.11\""} + +[package.extras] +docs = ["furo (>=2024.8.6)", "sphinx-autodoc-typehints (>=3)"] +testing = ["covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "setuptools (>=75.8)"] + +[[package]] +name = "pytest" +version = "8.3.4" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6"}, + {file = "pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=1.5,<2" +tomli = {version = ">=1", markers = "python_version < \"3.11\""} + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.24.0" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"}, + {file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-cov" +version = "6.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pytest-cov-6.0.0.tar.gz", hash = "sha256:fde0b595ca248bb8e2d76f020b465f3b107c9632e6a1d1705f17834c89dcadc0"}, + {file = "pytest_cov-6.0.0-py3-none-any.whl", hash = "sha256:eee6f1b9e61008bd34975a4d5bab25801eb31898b032dd55addc93e96fcaaa35"}, +] + +[package.dependencies] +coverage = {version = ">=7.5", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1591,24 +1851,6 @@ files = [ [package.dependencies] requests = ">=2.0.1,<3.0.0" -[[package]] -name = "rfc3986" -version = "1.5.0" -description = "Validating URI References per RFC 3986" -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "rfc3986-1.5.0-py2.py3-none-any.whl", hash = "sha256:a86d6e1f5b1dc238b218b012df0aa79409667bb209e58da56d0b94704e712a97"}, - {file = "rfc3986-1.5.0.tar.gz", hash = "sha256:270aaf10d87d0d4e095063c65bf3ddbc6ee3d0b226328ce21e036f946e421835"}, -] - -[package.dependencies] -idna = {version = "*", optional = true, markers = "extra == \"idna2008\""} - -[package.extras] -idna2008 = ["idna"] - [[package]] name = "semver" version = "2.13.0" @@ -1685,22 +1927,22 @@ deprecation = "*" [[package]] name = "starlette" -version = "0.27.0" +version = "0.45.3" description = "The little ASGI library that shines." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, - {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, + {file = "starlette-0.45.3-py3-none-any.whl", hash = "sha256:dfb6d332576f136ec740296c7e8bb8c8a7125044e7c6da30744718880cdd059d"}, + {file = "starlette-0.45.3.tar.gz", hash = "sha256:2cbcba2a75806f8a41c722141486f37c28e30a0921c5f6fe4346cb0dcee1302f"}, ] [package.dependencies] -anyio = ">=3.4.0,<5" +anyio = ">=3.6.2,<5" typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""} [package.extras] -full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] [[package]] name = "toml" @@ -1714,25 +1956,96 @@ files = [ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] +[[package]] +name = "tomli" +version = "2.2.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.8" +groups = ["main"] +markers = "python_full_version <= \"3.11.0a6\"" +files = [ + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, +] + +[[package]] +name = "tox" +version = "4.24.1" +description = "tox is a generic virtualenv management and test command line tool" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "tox-4.24.1-py3-none-any.whl", hash = "sha256:57ba7df7d199002c6df8c2db9e6484f3de6ca8f42013c083ea2d4d1e5c6bdc75"}, + {file = "tox-4.24.1.tar.gz", hash = "sha256:083a720adbc6166fff0b7d1df9d154f9d00bfccb9403b8abf6bc0ee435d6a62e"}, +] + +[package.dependencies] +cachetools = ">=5.5" +chardet = ">=5.2" +colorama = ">=0.4.6" +filelock = ">=3.16.1" +packaging = ">=24.2" +platformdirs = ">=4.3.6" +pluggy = ">=1.5" +pyproject-api = ">=1.8" +tomli = {version = ">=2.1", markers = "python_version < \"3.11\""} +typing-extensions = {version = ">=4.12.2", markers = "python_version < \"3.11\""} +virtualenv = ">=20.27.1" + +[package.extras] +test = ["devpi-process (>=1.0.2)", "pytest (>=8.3.3)", "pytest-mock (>=3.14)"] + [[package]] name = "trio" -version = "0.18.0" +version = "0.28.0" description = "A friendly Python library for async concurrency and I/O" optional = false -python-versions = ">=3.6" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "trio-0.18.0-py3-none-any.whl", hash = "sha256:a42af0634ba729cbfe8578be058750c6471dac19fbc7167ec6a3ca3f966fb424"}, - {file = "trio-0.18.0.tar.gz", hash = "sha256:87a66ae61f27fe500c9024926a9ba482c07e1e0f56380b70a264d19c435ba076"}, + {file = "trio-0.28.0-py3-none-any.whl", hash = "sha256:56d58977acc1635735a96581ec70513cc781b8b6decd299c487d3be2a721cd94"}, + {file = "trio-0.28.0.tar.gz", hash = "sha256:4e547896fe9e8a5658e54e4c7c5fa1db748cbbbaa7c965e7d40505b928c73c05"}, ] [package.dependencies] -async-generator = ">=1.9" -attrs = ">=19.2.0" +attrs = ">=23.2.0" cffi = {version = ">=1.14", markers = "os_name == \"nt\" and implementation_name != \"pypy\""} +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} idna = "*" outcome = "*" -sniffio = "*" +sniffio = ">=1.3.0" sortedcontainers = "*" [[package]] @@ -1766,48 +2079,96 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "uvicorn" -version = "0.14.0" +version = "0.34.0" description = "The lightning-fast ASGI server." optional = false -python-versions = "*" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "uvicorn-0.14.0-py3-none-any.whl", hash = "sha256:2a76bb359171a504b3d1c853409af3adbfa5cef374a4a59e5881945a97a93eae"}, - {file = "uvicorn-0.14.0.tar.gz", hash = "sha256:45ad7dfaaa7d55cab4cd1e85e03f27e9d60bc067ddc59db52a2b0aeca8870292"}, + {file = "uvicorn-0.34.0-py3-none-any.whl", hash = "sha256:023dc038422502fa28a09c7a30bf2b6991512da7dcdb8fd35fe57cfc154126f4"}, + {file = "uvicorn-0.34.0.tar.gz", hash = "sha256:404051050cd7e905de2c9a7e61790943440b3416f49cb409f965d9dcd0fa73e9"}, ] [package.dependencies] -asgiref = ">=3.3.4" -click = ">=7" +click = ">=7.0" h11 = ">=0.8" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} [package.extras] -standard = ["PyYAML (>=5.1)", "colorama (>=0.4)", "httptools (==0.2.*)", "python-dotenv (>=0.13)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchgod (>=0.6)", "websockets (>=9.1)"] +standard = ["colorama (>=0.4)", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] [[package]] name = "uvloop" -version = "0.15.3" +version = "0.21.0" description = "Fast implementation of asyncio event loop on top of libuv" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8.0" +groups = ["main"] +files = [ + {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ec7e6b09a6fdded42403182ab6b832b71f4edaf7f37a9a0e371a01db5f0cb45f"}, + {file = "uvloop-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:196274f2adb9689a289ad7d65700d37df0c0930fd8e4e743fa4834e850d7719d"}, + {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f38b2e090258d051d68a5b14d1da7203a3c3677321cf32a95a6f4db4dd8b6f26"}, + {file = "uvloop-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c43e0f13022b998eb9b973b5e97200c8b90823454d4bc06ab33829e09fb9bb"}, + {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:10d66943def5fcb6e7b37310eb6b5639fd2ccbc38df1177262b0640c3ca68c1f"}, + {file = "uvloop-0.21.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:67dd654b8ca23aed0a8e99010b4c34aca62f4b7fce88f39d452ed7622c94845c"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553"}, + {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:17df489689befc72c39a08359efac29bbee8eee5209650d4b9f34df73d22e414"}, + {file = "uvloop-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bc09f0ff191e61c2d592a752423c767b4ebb2986daa9ed62908e2b1b9a9ae206"}, + {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f0ce1b49560b1d2d8a2977e3ba4afb2414fb46b86a1b64056bc4ab929efdafbe"}, + {file = "uvloop-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e678ad6fe52af2c58d2ae3c73dc85524ba8abe637f134bf3564ed07f555c5e79"}, + {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:460def4412e473896ef179a1671b40c039c7012184b627898eea5072ef6f017a"}, + {file = "uvloop-0.21.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:10da8046cc4a8f12c91a1c39d1dd1585c41162a15caaef165c2174db9ef18bdc"}, + {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c097078b8031190c934ed0ebfee8cc5f9ba9642e6eb88322b9958b649750f72b"}, + {file = "uvloop-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:46923b0b5ee7fc0020bef24afe7836cb068f5050ca04caf6b487c513dc1a20b2"}, + {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53e420a3afe22cdcf2a0f4846e377d16e718bc70103d7088a4f7623567ba5fb0"}, + {file = "uvloop-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88cb67cdbc0e483da00af0b2c3cdad4b7c61ceb1ee0f33fe00e09c81e3a6cb75"}, + {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:221f4f2a1f46032b403bf3be628011caf75428ee3cc204a22addf96f586b19fd"}, + {file = "uvloop-0.21.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2d1f581393673ce119355d56da84fe1dd9d2bb8b3d13ce792524e1607139feff"}, + {file = "uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3"}, +] + +[package.extras] +dev = ["Cython (>=3.0,<4.0)", "setuptools (>=60)"] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["aiohttp (>=3.10.5)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + +[[package]] +name = "virtualenv" +version = "20.29.1" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.8" groups = ["main"] files = [ - {file = "uvloop-0.15.3-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:e71fb9038bfcd7646ca126c5ef19b17e48d4af9e838b2bcfda7a9f55a6552a32"}, - {file = "uvloop-0.15.3-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7522df4e45e4f25b50adbbbeb5bb9847495c438a628177099d2721f2751ff825"}, - {file = "uvloop-0.15.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae2b325c0f6d748027f7463077e457006b4fdb35a8788f01754aadba825285ee"}, - {file = "uvloop-0.15.3-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:0de811931e90ae2da9e19ce70ffad73047ab0c1dba7c6e74f9ae1a3aabeb89bd"}, - {file = "uvloop-0.15.3-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7f4b8a905df909a407c5791fb582f6c03b0d3b491ecdc1cdceaefbc9bf9e08f6"}, - {file = "uvloop-0.15.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d8ffe44ae709f839c54bacf14ed283f41bee90430c3b398e521e10f8d117b3a"}, - {file = "uvloop-0.15.3-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:63a3288abbc9c8ee979d7e34c34e780b2fbab3e7e53d00b6c80271119f277399"}, - {file = "uvloop-0.15.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5cda65fc60a645470b8525ce014516b120b7057b576fa876cdfdd5e60ab1efbb"}, - {file = "uvloop-0.15.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ff05116ede1ebdd81802df339e5b1d4cab1dfbd99295bf27e90b4cec64d70e9"}, - {file = "uvloop-0.15.3.tar.gz", hash = "sha256:905f0adb0c09c9f44222ee02f6b96fd88b493478fffb7a345287f9444e926030"}, + {file = "virtualenv-20.29.1-py3-none-any.whl", hash = "sha256:4e4cb403c0b0da39e13b46b1b2476e505cb0046b25f242bee80f62bf990b2779"}, + {file = "virtualenv-20.29.1.tar.gz", hash = "sha256:b8b8970138d32fb606192cb97f6cd4bb644fa486be9308fb9b63f81091b5dc35"}, ] +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + [package.extras] -dev = ["Cython (>=0.29.20,<0.30.0)", "Sphinx (>=1.7.3,<1.8.0)", "aiohttp", "flake8 (>=3.8.4,<3.9.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=19.0.0,<19.1.0)", "pycodestyle (>=2.6.0,<2.7.0)", "pytest (>=3.6.0)", "sphinx-rtd-theme (>=0.2.4,<0.3.0)", "sphinxcontrib-asyncio (>=0.2.0,<0.3.0)"] -docs = ["Sphinx (>=1.7.3,<1.8.0)", "sphinx-rtd-theme (>=0.2.4,<0.3.0)", "sphinxcontrib-asyncio (>=0.2.0,<0.3.0)"] -test = ["aiohttp", "flake8 (>=3.8.4,<3.9.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=19.0.0,<19.1.0)", "pycodestyle (>=2.6.0,<2.7.0)"] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] [[package]] name = "vyper-config" @@ -1884,84 +2245,91 @@ files = [ [[package]] name = "wrapt" -version = "1.17.1" +version = "1.17.2" description = "Module for decorators, wrappers and monkey patching." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "wrapt-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9176057c60438c2ce2284cdefc2b3ee5eddc8c87cd6e24c558d9f5c64298fa4a"}, - {file = "wrapt-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e0f0e731e0ca1583befd3af71b9f90d64ded1535da7b80181cb9e907cc10bbae"}, - {file = "wrapt-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:144ed42a4ec3aca5d6f1524f99ee49493bbd0d9c66c24da7ec44b4661dca4dcc"}, - {file = "wrapt-1.17.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8a7b0699a381226d81d75b48ea58414beb5891ba8982bdc8e42912f766de074"}, - {file = "wrapt-1.17.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b20fcef5a3ee410671a5a59472e1ff9dda21cfbe5dfd15e23ee4b99ac455c8e"}, - {file = "wrapt-1.17.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b9a58a1cbdc0588ed4c8ab0c191002d5d831a58c3bad88523fe471ea97eaf57d"}, - {file = "wrapt-1.17.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:50bbfa7a92da7540426c774e09d6901e44d8f9b513b276ebae03ae244f0c6dbf"}, - {file = "wrapt-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:09f5141599eaf36d6cc0b760ad87c2ab6b8618d009b2922639266676775a73a6"}, - {file = "wrapt-1.17.1-cp310-cp310-win32.whl", hash = "sha256:589f24449fd58508533c4a69b2a0f45e9e3419b86b43a0607e2fdb989c6f2552"}, - {file = "wrapt-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:7eca3a1afa9820785b79cb137c68ca38c2f77cfedc3120115da42e1d5800907e"}, - {file = "wrapt-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:da0d0c1c4bd55f9ace919454776dbf0821f537b9a77f739f0c3e34b14728b3b3"}, - {file = "wrapt-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cd7649f0c493d35f9aad9790bbecd7b6fd2e2f7141f6cb1e1e9bb7a681d6d0a4"}, - {file = "wrapt-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0aad4f54b3155d673a5c4706a71a0a84f3d415b2fc8a2a399a964d70f18846a2"}, - {file = "wrapt-1.17.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ebea3ebb6a394f50f150a52e279508e91c8770625ac8fcb5d8cf35995a320f2"}, - {file = "wrapt-1.17.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53e2986a65eba7c399d7ad1ccd204562d4ffe6e937344fe5a49eb5a83858f797"}, - {file = "wrapt-1.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:67c30d3fe245adb0eb1061a0e526905970a0dabe7c5fba5078e0ee9d19f28167"}, - {file = "wrapt-1.17.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6fd88935b12b59a933ef45facb57575095f205d30d0ae8dd1a3b485bc4fa2fbd"}, - {file = "wrapt-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ec3e763e7ca8dcba0792fc3e8ff7061186f59e9aafe4438e6bb1f635a6ab0901"}, - {file = "wrapt-1.17.1-cp311-cp311-win32.whl", hash = "sha256:d792631942a102d6d4f71e4948aceb307310ac0a0af054be6d28b4f79583e0f1"}, - {file = "wrapt-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:3dfd4738a630eddfcb7ff6c8e9fe863df3821f9c991dec73821e05450074ae09"}, - {file = "wrapt-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b1a4c8edd038fee0ce67bf119b16eaa45d22a52bbaf7d0a17d2312eb0003b1bb"}, - {file = "wrapt-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:181a844005c9818792212a32e004cb4c6bd8e35cae8e97b1a39a1918d95cef58"}, - {file = "wrapt-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21ffcf16f5c243a626b0f8da637948e3d5984e3bc0c1bc500ad990e88e974e3b"}, - {file = "wrapt-1.17.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb33799b7582bb73787b9903b70595f8eff67eecc9455f668ed01adf53f9eea"}, - {file = "wrapt-1.17.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57e932ad1908b53e9ad67a746432f02bc8473a9ee16e26a47645a2b224fba5fd"}, - {file = "wrapt-1.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b8bd35c15bc82c5cbe397e8196fa57a17ce5d3f30e925a6fd39e4c5bb02fdcff"}, - {file = "wrapt-1.17.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:93018dbb956e0ad99ea2fa2c3c22f033549dcb1f56ad9f4555dfe25e49688c5d"}, - {file = "wrapt-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e5bd9186d52cf3d36bf1823be0e85297e4dbad909bc6dd495ce0d272806d84a7"}, - {file = "wrapt-1.17.1-cp312-cp312-win32.whl", hash = "sha256:d609f0ab0603bbcbf2de906b366b9f9bec75c32b4493550a940de658cc2ce512"}, - {file = "wrapt-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:2c160bb8815787646b27a0c8575a26a4d6bf6abd7c5eb250ad3f2d38b29cb2cb"}, - {file = "wrapt-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:99e544e6ce26f89ad5acc6f407bc4daf7c1d42321e836f5c768f834100bdf35c"}, - {file = "wrapt-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:78da796b74f2c8e0af021ee99feb3bff7cb46f8e658fe25c20e66be1080db4a2"}, - {file = "wrapt-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f1bc359f6c52e53565e7af24b423e7a1eea97d155f38ac9e90e95303514710b"}, - {file = "wrapt-1.17.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cbead724daa13cae46e8ab3bb24938d8514d123f34345535b184f3eb1b7ad717"}, - {file = "wrapt-1.17.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bdf7b0e3d3713331c0bb9daac47cd10e5aa60d060e53696f50de4e560bd5617f"}, - {file = "wrapt-1.17.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f17e8d926f63aed65ff949682c922f96d00f65c2e852c24272232313fa7823d5"}, - {file = "wrapt-1.17.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9e04f3bd30e0b23c0ca7e1d4084e7d28b6d7d2feb8b7bc69b496fe881280579b"}, - {file = "wrapt-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5660e470edfa15ae7ef407272c642d29e9962777a6b30bfa8fc0da2173dc9afd"}, - {file = "wrapt-1.17.1-cp313-cp313-win32.whl", hash = "sha256:a992f9e019145e84616048556546edeaba68e05e1c1ffbe8391067a63cdadb0c"}, - {file = "wrapt-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:5c2e24ba455af4b0a237a890ea6ed9bafd01fac2c47095f87c53ea3344215d43"}, - {file = "wrapt-1.17.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:88623fd957ba500d8bb0f7427a76496d99313ca2f9e932481c0882e034cf1add"}, - {file = "wrapt-1.17.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:162d5f15bdd3b8037e06540902227ef9e0f298496c0afaadd9e2875851446693"}, - {file = "wrapt-1.17.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bb82447ddae4e3d9b51f40c494f66e6cbd8fb0e8e8b993678416535c67f9a0d"}, - {file = "wrapt-1.17.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ce4cff3922707048d754e365c4ebf41a3bcbf29b329349bf85d51873c7c7e9e"}, - {file = "wrapt-1.17.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fdc4e73a3fa0c25eed4d836d9732226f0326957cb075044a7f252b465299433"}, - {file = "wrapt-1.17.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:bca1c0824f824bcd97b4b179dd55dcad1dab419252be2b2faebbcacefa3b27b2"}, - {file = "wrapt-1.17.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6d44b14f3a2f6343a07c90344850b7af5515538ce3a5d01f9c87d8bae9bd8724"}, - {file = "wrapt-1.17.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:169033329022739c6f0d8cd3031a113953b0ba500f3d5978904bdd40baec4568"}, - {file = "wrapt-1.17.1-cp313-cp313t-win32.whl", hash = "sha256:52f0907287d9104112dbebda46af4db0793fcc4c64c8a867099212d116b6db64"}, - {file = "wrapt-1.17.1-cp313-cp313t-win_amd64.whl", hash = "sha256:7966f98fa36933333d8a1c3d8552aa3d0735001901a4aabcfbd5a502b4ef14fe"}, - {file = "wrapt-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:27a49f217839bf559d436308bae8fc4a9dd0ac98ffdb9d6aeb3f00385b0fb72c"}, - {file = "wrapt-1.17.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:50a4e3b45e62b1ccb96b3fc0e427f1b458ff2e0def34ae084de88418157a09d1"}, - {file = "wrapt-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30c0c08434fe2af6e40c5c75c036d7e3c7e7f499079fc479e740d9586b09fb0d"}, - {file = "wrapt-1.17.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15f96fe5e2efdc613983327240ae89cf6368c07eeb0f194d240e9549aa1ea739"}, - {file = "wrapt-1.17.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14f78f8c313884f889c6696af62aa881af302a989a7c0df398d2b541fa53e8a9"}, - {file = "wrapt-1.17.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d87334b521ab0e2564902c0b10039dee8670485e9d397fe97c34b88801f474f7"}, - {file = "wrapt-1.17.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:97eaff096fcb467e0f486f3bf354c1072245c2045859d71ba71158717ec97dcc"}, - {file = "wrapt-1.17.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:13887d1415dc0e213a9adeb9026ae1f427023f77110d988fbd478643490aa40c"}, - {file = "wrapt-1.17.1-cp38-cp38-win32.whl", hash = "sha256:823a262d967cbdf835787039b873ff551e36c14658bdc2e43267968b67f61f88"}, - {file = "wrapt-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:889587664d245dae75c752b643061f922e8a590d43a4cd088eca415ca83f2d13"}, - {file = "wrapt-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:997e8f9b984e4263993d3baf3329367e7c7673b63789bc761718a6f9ed68653d"}, - {file = "wrapt-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bac64f57a5a7926ebc9ab519fb9eba1fc6dcd1f65d7f45937b2ce38da65c2270"}, - {file = "wrapt-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7aa07603d67007c15b33d20095cc9276f3e127bfb1b8106b3e84ec6907d137e"}, - {file = "wrapt-1.17.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c53ef8936c4d587cb96bb1cf0d076e822fa38266c2b646837ef60465da8db22e"}, - {file = "wrapt-1.17.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e72053cc4706dac537d5a772135dc3e1de5aff52883f49994c1757c1b2dc9db2"}, - {file = "wrapt-1.17.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0ee037e4cc9d039efe712b13c483f4efa2c3499642369e01570b3bb1842eea3f"}, - {file = "wrapt-1.17.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20888d886186d19eab53816db2e615950b1ce7dbd5c239107daf2c8a6a4a03c6"}, - {file = "wrapt-1.17.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1c119802ae432b8c5d55dd5253825d09c1dca1c97ffc7b32c53ecdb348712f64"}, - {file = "wrapt-1.17.1-cp39-cp39-win32.whl", hash = "sha256:3260178f3bc006acae93378bfd6dbf33c9249de93cc1b78d8cc7b7416f4ea99a"}, - {file = "wrapt-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:18fb16fb6bb75f4ec6272829007f3129a9a5264d0230372f9651e5f75cfec552"}, - {file = "wrapt-1.17.1-py3-none-any.whl", hash = "sha256:f3117feb1fc479eaf84b549d3f229d5d2abdb823f003bc2a1c6dd70072912fa0"}, - {file = "wrapt-1.17.1.tar.gz", hash = "sha256:16b2fdfa09a74a3930175b6d9d7d008022aa72a4f02de2b3eecafcc1adfd3cfe"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, + {file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"}, + {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"}, + {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"}, + {file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"}, + {file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"}, + {file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"}, + {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"}, + {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"}, + {file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"}, + {file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"}, + {file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"}, + {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"}, + {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"}, + {file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"}, + {file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"}, + {file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"}, + {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"}, + {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"}, + {file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"}, + {file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"}, + {file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"}, + {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"}, + {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"}, + {file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"}, + {file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"}, + {file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"}, + {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"}, + {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"}, + {file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"}, + {file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"}, + {file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"}, + {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"}, + {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"}, + {file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"}, + {file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"}, + {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, + {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, ] [[package]] @@ -2064,4 +2432,4 @@ propcache = ">=0.2.0" [metadata] lock-version = "2.1" python-versions = "^3.9" -content-hash = "6c4ce480ad4e6f2cf11ec7e00e3758302c338f957e4db67026a206198ebb7cd7" +content-hash = "ff6025fa673bd842cfaa048f95c45903b60f8158a43d5ff61de49e4ed167b351" diff --git a/backend/pyproject.toml b/backend/pyproject.toml index 1db835c1..b2f64d8e 100644 --- a/backend/pyproject.toml +++ b/backend/pyproject.toml @@ -1,32 +1,118 @@ [project] name = "openshift_perfscale_api" +requires-python = "^3.9" +dynamic = ["dependencies"] version = "0.1.1" description = "Python transformer of OpenShift performance and scale test results" authors = [{name = "mleader", email = "mleader@redhat.com"}] +[tool.poetry] +packages = [ + { include = "app" } +] + [tool.poetry.dependencies] -aiohttp = "^3.7.4" -atlassian-python-api = "^3.41.9" +aiohttp = "^3.11.11" +atlassian-python-api = "^3.41.9.20" cryptography = "^3.4.8" elasticsearch = "7.13.4" -fastapi = "^0.104.1" -httptools = "^0.2.0" -httpx = "^0.18.1" +fastapi = "^0.115.6" +httptools = "^0.6.4" +httpx = "^0.28.1" +numpy = "1.26.4" orjson = "^3.5.3" pandas = "1.2.4" -pydantic = "2.3.0" +pydantic = "2.10.5" python = "^3.9" python-keycloak = "^3.12.0" +pytest = "^8.3.4" +pytest-asyncio = "^0.24" +pytest-cov = "^6.0" semver = "2.13.0" splunk-sdk = "2.0.1" -trio = "^0.18.0" -uvicorn = "^0.14.0" -uvloop = "^0.15.2" +tox = "^4.23.2" +trio = "^0.28.0" +uvicorn = "^0.34.0" +uvloop = "^0.21.0" vyper-config = "1.0.0" [tool.poetry.group.dev.dependencies] watchgod = "^0.7" +[tool.pytest.ini_options] +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" + +[tool.isort] +profile = "black" # black-compatible (e.g., trailing comma) +known_first_party = ["app"] # separate our headers into a section +multi_line_output = 3 # "hanging" indent with dedented paren +force_sort_within_sections = true # don't separate import vs from +order_by_type = false # sort alphabetic regardless of case + +[tool.tox] +requires = ["tox>=4.23.2"] +env_list = ["functional", "unit", "format", "lint", "isort"] + +[tool.tox.env_run_base] +description = "Run test under {base_python}" +base_python = ["python3.9"] +deps = [ + "pytest", + "pytest-asyncio", + "pytest-cov", + "coverage", +] + +[tool.tox.env.unit] +set_env.COVERAGE = { replace = "env", name = "COVERAGE", default = "/var/tmp/{env:USER}" } +allowlist_externals = ["bash", "echo", "coverage"] +commands = [ + ["echo", "{env:COVERAGE}"], + ["pip", "list"], + ["pytest", "-s", "--cov-branch", "--cov=app", "{posargs}", "tests/unit"], + ["coverage", "html", "--directory={env:COVERAGE}/html"], + ["bash", "-c", "coverage report --format=markdown >{env:COVERAGE}/coverage.txt"], +] + +[tool.tox.env.functional] +requires = ["requests"] +set_env.SERVER = { replace = "env", name = "SERVER", default = "http://localhost:8000" } +allowlist_externals = ["echo", "tests/functional/setup/test.sh"] +commands = [ + ["echo", "functional tests against {env:SERVER}"], + ["tests/functional/setup/test.sh"] +] + +[tool.tox.env.format] +description = "check code format" +skip_install = true +deps = ["black"] +commands = [["black", "--check", { replace = "posargs", default = ["app", "tests"], extend = true} ]] + +[tool.tox.env.isort] +description = "check order of imports" +skip_install = true +deps = ["isort"] +commands = [["isort", "--check", { replace = "posargs", default = ["app", "tests"], extend = true} ]] + +[tool.tox.env.lint] +description = "check code" +skip_install = true +deps = ["flake8"] +commands = [["flake8", { replace = "posargs", default = ["app", "tests"], extend = true} ]] + +[tool.coverage.run] +branch = true +cover_pylib = true +data_file = "${COVERAGE}/coverage.db" +parallel = true +relative_files = true + +[tool.coverage.report] +include_namespace_packages = true +skip_empty = true + [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" diff --git a/backend/skeleton.toml b/backend/skeleton.toml index 81662a55..2aac4574 100644 --- a/backend/skeleton.toml +++ b/backend/skeleton.toml @@ -15,3 +15,8 @@ personal_access_token= url= username= password= + +[crucible] +url= +username= +password= diff --git a/backend/tests/__init__.py b/backend/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/tests/functional/__init__.py b/backend/tests/functional/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/tests/functional/conftest.py b/backend/tests/functional/conftest.py new file mode 100644 index 00000000..331a2693 --- /dev/null +++ b/backend/tests/functional/conftest.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +"""Set up Opensearch for functional tests + +The functional test Opensearch instance has a "golden" snapshot called +"functional" in the designated snapshot directory. We need to call +Opensearch APIs to establish that snapshot repository and restore the +snapshot. Since the Opensearch is started in an isolated pod, we can't +script this from outside the pod; so we run this little Python program +inside the Opensearch container. +""" +import os +import time +import pytest +from elasticsearch import Elasticsearch + + +@pytest.fixture(scope="session") +def server(): + server = os.getenv("SERVER") + assert server, "SERVER environment variable must be set" + return server + + +@pytest.fixture(scope="session", autouse=True) +def restore_snapshot(): + ok = False + start = time.time() + while not ok: + try: + db = Elasticsearch("http://localhost:9200") + r = db.indices.get("*") + ok = True + except Exception as exc: + print(f"Opensearch isn't ready: {str(exc)!r}") + time.sleep(5) + print(f"Opensearch ready after {time.time()-start:.3f} seconds") + cdm = {i for i in r.keys() if i.startswith("cdmv")} + if cdm: + print(f"CDM indices appear to be available: {','.join(cdm)}") + else: + # Opensearch hasn't been loaded yet, so restore the snapshot + print("Restoring 'base' snapshot...") + r = db.snapshot.create_repository(repository="functional", body={"type": "fs", "settings": {"location": "/var/tmp/snapshot"}}) + assert r.get("acknowledged") is True + r = db.snapshot.get(repository="functional", snapshot="base") + # We expect one snapshot, named "base" + assert r["snapshots"][0]["snapshot"] == "base" + r = db.snapshot.restore(repository="functional", snapshot="base", body={"indices": "cdmv*dev-*"}, wait_for_completion=True) + assert r["snapshot"]["shards"]["failed"] == 0 diff --git a/backend/tests/functional/setup/funcconfig.toml b/backend/tests/functional/setup/funcconfig.toml new file mode 100644 index 00000000..536c8674 --- /dev/null +++ b/backend/tests/functional/setup/funcconfig.toml @@ -0,0 +1,66 @@ +[ocp.elasticsearch] +url = "" +indice = "" +username = "" +password = "" + +[ocp.elasticsearch.internal] +url = '' +prefix = '' +indice = '' +username = '' +password = '' + +[hce.elasticsearch] +url = "" +indice = "" + +[quay.elasticsearch] +url='' +indice='' +username='' +password=',' + +[telco.config] +job_url = '' + +[telco.splunk] +host = '' +port = 8089 +indice = '' +username = '' +password = '' + +[airflow] +url = "" +username = "" +password = "" + +[ocp-server] +port = 8000 + +[ocm.elasticsearch] +url = "" +indice = "" +username = "" +password = "" + +[elasticsearch] + url = "" + indice = "" + username = "" + password = "" + +[jira] + url = "" + personal_access_token = "" + +[horreum] + url = "" + username = "" + password = "" + +[ilab.crucible] + url = "http://localhost:9200" + username = "" + password = "" diff --git a/backend/tests/functional/setup/functional.containerfile b/backend/tests/functional/setup/functional.containerfile new file mode 100644 index 00000000..b7a51322 --- /dev/null +++ b/backend/tests/functional/setup/functional.containerfile @@ -0,0 +1,26 @@ +FROM quay.io/centos/centos:stream9 + +ENV PATH=/root/.local/bin:$PATH \ + LANG=C.UTF-8 \ + PYTHONPATH=/backend + +RUN mkdir -p /backend/tests/functional + +COPY tests/functional/*.py /backend/tests/functional/ +COPY pyproject.toml /backend +COPY poetry.lock /backend + +WORKDIR /backend + +RUN dnf install -y pip gcc python3-devel gcc-c++ + +RUN pip install --user poetry && \ + poetry self add poetry-plugin-export && \ + poetry export -f requirements.txt -o requirements.txt && \ + pip install -U typing-extensions && \ + pip install --user dash && \ + pip install --no-cache-dir -r requirements.txt + +ENV SERVER=http://localhost:8000 + +ENTRYPOINT ["pytest", "-s", "tests/functional"] diff --git a/backend/tests/functional/setup/opensearch.sh b/backend/tests/functional/setup/opensearch.sh new file mode 100755 index 00000000..f7378a43 --- /dev/null +++ b/backend/tests/functional/setup/opensearch.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -ex # Fail on error +BRANCH=$(git rev-parse --show-toplevel) +SETUP=${BRANCH}/backend/tests/functional/setup + +if [[ -n "${POD_NAME}" ]] ;then + echo "Running in POD ${POD_NAME}" + POD="--pod ${POD_NAME}" + PORTS= + NAME="${POD_NAME}-opensearch" +else + POD="" + PORTS="-p 9200:9200 -p 9600:9600" + NAME="opensearch" +fi + +podman run -d ${POD} --name "${NAME}" \ + -v "${SETUP}"/opensearch.yml:/usr/share/opensearch/config/opensearch.yml:z \ + -v "${SETUP}"/snapshot.tar.gz:/var/tmp/snapshot.tar.gz:z \ + ${PORTS} \ + -e "discovery.type=single-node" -e "DISABLE_INSTALL_DEMO_CONFIG=true" \ + -e "DISABLE_SECURITY_PLUGIN=true" \ + docker.io/opensearchproject/opensearch:latest +echo "Unpacking snapshot inside container" +podman exec "${NAME}" bash -c 'cd /var/tmp ; tar xfz snapshot.tar.gz' +echo "Done" diff --git a/backend/tests/functional/setup/opensearch.yml b/backend/tests/functional/setup/opensearch.yml new file mode 100755 index 00000000..d6286dcd --- /dev/null +++ b/backend/tests/functional/setup/opensearch.yml @@ -0,0 +1,17 @@ +--- +cluster.name: docker-cluster + +# Bind to all interfaces because we don't know what IP address Docker will assign to us. +network.host: 0.0.0.0 + +# # minimum_master_nodes need to be explicitly set when bound on a public IP +# # set to 1 to allow single node clusters +# discovery.zen.minimum_master_nodes: 1 + +# Setting network.host to a non-loopback address enables the annoying bootstrap checks. "Single-node" mode disables them again. +# discovery.type: single-node + +reindex.remote.allowlist: ["n42-h01-b01-mx750c.rdu3.labs.perfscale.redhat.com:9200"] + +# Register the mapped snapshot repository +path.repo: ["/var/tmp/snapshot"] diff --git a/backend/tests/functional/setup/snapshot.tar.gz b/backend/tests/functional/setup/snapshot.tar.gz new file mode 100755 index 00000000..4708c353 Binary files /dev/null and b/backend/tests/functional/setup/snapshot.tar.gz differ diff --git a/backend/tests/functional/setup/test.sh b/backend/tests/functional/setup/test.sh new file mode 100755 index 00000000..562ab6c2 --- /dev/null +++ b/backend/tests/functional/setup/test.sh @@ -0,0 +1,31 @@ +#!/bin/bash +set -ex + +cleanup () { + set +e + echo "Cleaning up..." + podman pod stop "${POD_NAME}" + podman rm "${POD_NAME}-func" "${POD_NAME}-front" "${POD_NAME}-back" "${POD_NAME}-opensearch" + podman pod rm "${POD_NAME}" +} + +BRANCH="$(git rev-parse --show-toplevel)" +BACKEND="${BRANCH}/backend" +FRONTEND="${BRANCH}/frontend" +SETUP="${BACKEND}"/tests/functional/setup +CPT_CONFIG=${CPT_CONFIG:-"${SETUP}/funcconfig.toml"} +export POD_NAME=${POD_NAME:-FUNC${RANDOM}} + +podman pod create "${POD_NAME}" +trap cleanup EXIT + +podman build -f backend.containerfile --tag backend "${BACKEND}" +podman build -f frontend.containerfile --tag frontend "${FRONTEND}" +podman build -f tests/functional/setup/functional.containerfile --tag functional "${BACKEND}" + +POD="--pod ${POD_NAME}" + +"${SETUP}"/opensearch.sh +podman run -d ${POD} --name="${POD_NAME}-back" -v "${CPT_CONFIG}:/backend/ocpperf.toml:Z" localhost/backend +podman run -d ${POD} --name="${POD_NAME}-front" localhost/frontend +podman run ${POD} --name="${POD_NAME}-func" localhost/functional diff --git a/backend/tests/functional/test_run.py b/backend/tests/functional/test_run.py new file mode 100644 index 00000000..1826a225 --- /dev/null +++ b/backend/tests/functional/test_run.py @@ -0,0 +1,120 @@ +from dataclasses import dataclass +from datetime import datetime, timezone +import os + +import requests +import pytest + + +@dataclass +class Run: + id: str + start: int + metrics: list[str] + iterations: int + + +# The "annointed" functional test Opensearch contains 5 ilab benchmark results. +# These are in (begin) timestamp order for comparison purposes. +RUNS: list[Run] = [ + Run( + "26ad48c1-fc9c-404d-bccf-d19755ca8a39", + 1726165775123, + ["ilab::sdg-samples-sec"], + 5, + ), + Run( + "1878b7d2-9195-4104-8bd0-13d31a8f5524", + 1730136441057, + ["ilab::actual-train-seconds"], + 1, + ), + Run( + "1c756d89-3aef-445b-b71b-99b8916f8537", + 1730212256723, + ["ilab::actual-train-seconds"], + 1, + ), + Run( + "20a15c35-22ac-482f-b209-4b8ecc018a26", + 1732201767072, + ["ilab::actual-train-seconds", "ilab::sdg-samples-sec"], + 2, + ), + Run( + "0d78ece9-817c-41aa-be19-e90d53924206", + 1732208487941, + ["ilab::actual-train-seconds", "ilab::sdg-samples-sec"], + 2, + ), +] + + +class TestRun: + + def test_get_runs(self, server): + + # Get all runs, regardless of date + response = requests.get(f"{server}/api/v1/ilab/runs", params={"all": "true"}) + assert response.status_code == 200 + result = response.json() + + # Our test database has 5 runs + assert result["total"] == 5 + + # We should get the expected run IDs + assert {r["id"] for r in result["results"]} == {e.id for e in RUNS} + + # All runs in the test database are from the "ilab" benchmark + assert {r["benchmark"] for r in result["results"]} == {"ilab"} + + # All Crucible runs "passed" + assert {r["status"] for r in result["results"]} == {"pass"} + + @pytest.mark.parametrize("start,end", ((None, 2), (2, None), (2, 4))) + def test_date_filter(self, server, start, end): + """Test that date filters work""" + sdate = None + edate = None + first = 0 + last = 5 + + if start: + first = start + stime = datetime.fromtimestamp(RUNS[start].start / 1000, timezone.utc) + sdate = f"{stime:%Y-%m-%dT%H:%M:%S}" + if end: + last = end + etime = datetime.fromtimestamp(RUNS[end].start / 1000, tz=timezone.utc) + edate = f"{etime:%Y-%m-%dT%H:%M:%S}" + response = requests.get( + f"{server}/api/v1/ilab/runs", {"start_date": sdate, "end_date": edate} + ) + result = response.json() + ids = {r.id for r in RUNS[first:last]} + assert result["total"] == len(ids) + assert {r["id"] for r in result["results"]} == ids + + def test_pagination(self, server): + response = requests.get( + f"{server}/api/v1/ilab/runs", {"all": "true", "size": 3, "offset": 1} + ) + result = response.json() + assert result["total"] == 5 + assert result["count"] == 3 + assert {r["id"] for r in result["results"]} == { + RUNS[1].id, + RUNS[2].id, + RUNS[3].id, + } + assert result["next_offset"] == 4 + + response = requests.get( + f"{server}/api/v1/ilab/runs", + {"all": "true", "size": 3, "offset": result["next_offset"]}, + ) + result = response.json() + assert result["total"] == 5 + assert result["count"] == 1 + assert result["results"][0]["id"] == RUNS[4].id + assert "next_offset" not in result diff --git a/backend/tests/unit/__init__.py b/backend/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/tests/unit/conftest.py b/backend/tests/unit/conftest.py new file mode 100644 index 00000000..702de39c --- /dev/null +++ b/backend/tests/unit/conftest.py @@ -0,0 +1,30 @@ +import pytest +from vyper import Vyper + +from app.services.crucible_svc import CrucibleService +from tests.unit.fake_elastic import FakeAsyncElasticsearch + + +@pytest.fixture +def fake_config(monkeypatch): + """Provide a fake configuration""" + + vyper = Vyper(config_name="ocpperf") + vyper.set("TEST.url", "http://elastic.example.com:9200") + monkeypatch.setattr("app.config.get_config", lambda: vyper) + + +@pytest.fixture +def fake_elastic(monkeypatch, fake_config): + """Replace the actual elastic client with a fake""" + + monkeypatch.setattr( + "app.services.crucible_svc.AsyncElasticsearch", FakeAsyncElasticsearch + ) + + +@pytest.fixture +async def fake_crucible(fake_elastic): + crucible = CrucibleService("TEST") + yield crucible + await crucible.close() diff --git a/backend/tests/unit/fake_elastic.py b/backend/tests/unit/fake_elastic.py new file mode 100644 index 00000000..79d64d3c --- /dev/null +++ b/backend/tests/unit/fake_elastic.py @@ -0,0 +1,177 @@ +from collections import defaultdict +from dataclasses import dataclass +from typing import Any, Optional, Union + +from elasticsearch import AsyncElasticsearch + + +@dataclass +class Request: + index: str + body: dict[str, Any] + doc_type: Optional[str] = None + params: Optional[Any] = None + headers: Optional[Any] = None + kwargs: Optional[dict[str, Any]] = None + + def __eq__(self, other) -> bool: + iok = self.index == other.index + bok = self.body == other.body + dok = self.doc_type == other.doc_type + pok = self.params == other.params + hok = self.headers == other.headers + + # make empty dict and None match + kok = (not self.kwargs and not other.kwargs) or self.kwargs == other.kwargs + return iok and bok and dok and pok and hok and kok + + +class FakeAsyncElasticsearch(AsyncElasticsearch): + hosts: Union[str, list[str]] + args: dict[str, Any] + closed: bool + requests: list[Request] + requests: list[Request] + + # This fake doesn't try to mimic Opensearch query and aggregation logic: + # instead, the "data" is pre-loaded with a JSON response body that will + # be returned on an "index" match. (This means that any external call we + # need to mock has a single query against any one index!) + data: dict[str, Any] + + def __init__(self, hosts: Union[str, list[str]], **kwargs): + self.hosts = hosts + self.args = kwargs + self.closed = False + self.data = defaultdict(list) + self.requests = [] + + # Testing helpers to manage fake searches + def set_query( + self, + root_index: str, + hit_list: Optional[list[dict[str, Any]]] = None, + aggregations: Optional[dict[str, Any]] = None, + version: int = 7, + repeat: int = 1, + ): + """Add a canned response to an Opensearch query + + The overall response and items in the hit and aggregation lists will be + augmented with the usual boilerplate. + + Multiple returns for a single index can be queued, in order, via + successive calls. To return the same result on multiple calls, specify + a "repeat" value greater than 1. + + Args: + root_index: CDM index name (run, period, etc) + hit_list: list of hit objects to be returned + aggregation_list: list of aggregation objects to return + version: CDM version + repeat: + """ + ver = f"v{version:d}dev" + index = f"cdm{ver}-{root_index}" + hits = [] + if hit_list: + for d in hit_list: + source = d + source["cdm"] = {"ver": ver} + hits.append( + { + "_index": index, + "_id": "random_string", + "_score": 1.0, + "_source": source, + } + ) + aggregate_response = {} + if aggregations: + for agg, val in aggregations.items(): + if isinstance(val, list): + aggregate_response[agg] = { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": val, + } + else: + aggregate_response[agg] = val + response = { + "took": 1, + "timed_out": False, + "_shards": {"total": 1, "successful": 1, "skipped": 0, "failed": 0}, + "hits": { + "total": {"value": len(hits), "relation": "eq"}, + "max_score": 1.0, + "hits": hits, + }, + } + if aggregate_response: + response["aggregations"] = aggregate_response + for c in range(repeat): + self.data[index].append(response) + + # Faked AsyncElasticsearch methods + async def close(self): + self.closed = True + + async def info(self, **kwargs): + pass + + async def ping(self, **kwargs): + return True + + async def search( + self, body=None, index=None, doc_type=None, params=None, headers=None, **kwargs + ): + """Return a canned response to a search query. + + Args: + body: query body + index: Opensearch index name + doc_type: document type (rarely used) + params: Opensearch search parameters (rarely used) + headers: HTTP headers (rarely used) + kwargs: whatever else you might pass to search + + Only the index is used here; to verify the correct Opensearch query + bodies and parameters, the full request is recorded for inspection. + + Return: + A JSON dict with the first canned result for the index, or an error + """ + self.requests.append( + Request( + index=index, + body=body, + doc_type=doc_type, + params=params, + headers=headers, + kwargs=kwargs, + ) + ) + if index in self.data and len(self.data[index]) > 0: + target = self.data[index].pop(0) + return target + return { + "error": { + "root_cause": [ + { + "type": "index_not_found_exception", + "reason": f"no such index [{index}]", + "index": index, + "resource.id": index, + "resource.type": "index_or_alias", + "index_uuid": "_na_", + }, + ], + "type": "index_not_found_exception", + "reason": f"no such index [{index}]", + "index": index, + "resource.id": index, + "resource.type": "index_or_alias", + "index_uuid": "_na_", + }, + "status": 404, + } diff --git a/backend/tests/unit/test_crucible.py b/backend/tests/unit/test_crucible.py new file mode 100644 index 00000000..d9e0bced --- /dev/null +++ b/backend/tests/unit/test_crucible.py @@ -0,0 +1,2064 @@ +from collections import defaultdict +from datetime import datetime, timezone +import json + +from elasticsearch import AsyncElasticsearch +from fastapi import HTTPException +import pytest + +import app.config +from app.services.crucible_svc import ( + CommonParams, + CrucibleService, + Graph, + GraphList, + Parser, +) +from tests.unit.fake_elastic import Request + + +class TestParser: + + def test_parse_normal(self): + """Test successful parsing of three terms""" + + t = Parser("foo:bar=x") + assert ("foo", ":") == t._next_token([":", "="]) + assert ("bar", "=") == t._next_token([":", "="]) + assert ("x", None) == t._next_token([":", "="], optional=True) + + def test_parse_missing(self): + """Test exception when a delimiter is missing""" + + t = Parser("foo:bar=x") + assert ("foo", ":") == t._next_token([":", "="]) + assert ("bar", "=") == t._next_token([":", "="]) + with pytest.raises(HTTPException) as e: + t._next_token(delimiters=[":", "="]) + assert 400 == e.value.status_code + assert "Missing delimiter from :,= after 'x'" == e.value.detail + + def test_parse_quoted(self): + """Test acceptance of quoted terms""" + + t = Parser("'foo':\"bar\"='x'") + assert ("foo", ":") == t._next_token([":", "="]) + assert ("bar", "=") == t._next_token([":", "="]) + assert ("x", None) == t._next_token([":", "="], optional=True) + + def test_parse_bad_quoted(self): + """Test detection of badly paired quotes""" + + t = Parser("'foo':'bar\"='x'") + assert ("foo", ":") == t._next_token([":", "="]) + with pytest.raises(HTTPException) as e: + t._next_token([":", "="]) + assert 400 == e.value.status_code + assert "Unterminated quote at '\\'foo\\':\\'bar[\"]=\\'x\\''" == e.value.detail + + +class TestCommonParams: + + def test_one(self): + """Test that we drop unique params""" + + c = CommonParams() + c.add({"one": 1, "two": 2}) + c.add({"one": 1, "three": 3}) + c.add({"one": 1, "two": 5}) + assert {"one": 1} == c.render() + + +class TestList: + + @pytest.mark.parametrize( + "input,output", + ( + (None, []), + (["a"], ["a"]), + (["a", "b"], ["a", "b"]), + (["a,b"], ["a", "b"]), + (["a", "b,c", "d"], ["a", "b", "c", "d"]), + ), + ) + def test_split_empty(self, input, output): + assert output == CrucibleService._split_list(input) + + +class TestFormatters: + + @pytest.mark.parametrize( + "input", + ( + "2024-09-12 18:29:35.123000+00:00", + datetime.fromisoformat("2024-09-12 18:29:35.123000+00:00"), + "1726165775123", + 1726165775123, + ), + ) + def test_normalize_date(self, input): + assert 1726165775123 == CrucibleService._normalize_date(input) + + def test_normalize_date_bad(self): + with pytest.raises(HTTPException) as e: + CrucibleService._normalize_date([]) + assert 400 == e.value.status_code + assert "Date representation [] is not a date string or timestamp" + + @pytest.mark.parametrize( + "input,output", + ( + ("abc", "1970-01-01 00:00:00+00:00"), + ("1726165775123", "2024-09-12 18:29:35.123000+00:00"), + (1726165775123, "2024-09-12 18:29:35.123000+00:00"), + ), + ) + def test_format_timestamp(self, input, output): + assert output == CrucibleService._format_timestamp(input) + + def test_format_data(self): + begin = 1726165775123 + duration = 10244 + raw = { + "begin": str(begin), + "end": str(begin + duration), + "duration": str(duration), + "value": "100.3", + } + expect = { + "begin": "2024-09-12 18:29:35.123000+00:00", + "end": "2024-09-12 18:29:45.367000+00:00", + "duration": 10.244, + "value": 100.3, + } + assert expect == CrucibleService._format_data(raw) + + def test_format_period(self): + raw = { + "begin": "1726165775123", + "end": "1726165785234", + "id": "ABC-123", + "name": "measurement", + } + expect = { + "begin": "2024-09-12 18:29:35.123000+00:00", + "end": "2024-09-12 18:29:45.234000+00:00", + "id": "ABC-123", + "name": "measurement", + } + assert expect == CrucibleService._format_period(raw) + + +class TestHits: + + def test_no_hits(self): + """Expect an exception because 'hits' is missing""" + + with pytest.raises(HTTPException) as e: + for a in CrucibleService._hits({}): + assert f"Unexpected result {type(a)}" + assert 500 == e.value.status_code + assert "Attempt to iterate hits for {}" == e.value.detail + + def test_empty_hits(self): + """Expect successful iteration of no hits""" + + for a in CrucibleService._hits({"hits": {"hits": []}}): + assert f"Unexpected result {a}" + + def test_hits(self): + """Test that iteration through hits works""" + + expected = [{"a": 1}, {"b": 1}] + payload = [{"_source": a} for a in expected] + assert expected == list(CrucibleService._hits({"hits": {"hits": payload}})) + + def test_hits_fields(self): + """Test that iteration through hit fields works""" + + expected = [{"a": 1}, {"b": 1}] + payload = [{"_source": {"f": a, "e": 1}} for a in expected] + assert expected == list( + CrucibleService._hits({"hits": {"hits": payload}}, ["f"]) + ) + + +class TestAggregates: + + def test_no_aggregations(self): + """Expect an exception if the aggregations are missing""" + with pytest.raises(HTTPException) as e: + for a in CrucibleService._aggs({}, "agg"): + assert f"Unexpected result {a}" + assert 500 == e.value.status_code + assert "Attempt to iterate missing aggregations for {}" == e.value.detail + + def test_missing_agg(self): + """Expect an exception if the aggregations are missing""" + + payload = {"aggregations": {}} + with pytest.raises(HTTPException) as e: + for a in CrucibleService._aggs(payload, "agg"): + assert f"Unexpected result {a}" + assert 500 == e.value.status_code + assert ( + f"Attempt to iterate missing aggregation 'agg' for {payload}" + == e.value.detail + ) + + def test_empty_aggs(self): + """Expect successful iteration of no aggregation data""" + + for a in CrucibleService._aggs( + {"aggregations": {"agg": {"buckets": []}}}, "agg" + ): + assert f"Unexpected result {a}" + + def test_aggs(self): + """Test that iteration through aggregations works""" + + expected = [{"key": 1, "doc_count": 2}, {"key": 2, "doc_count": 5}] + payload = { + "hits": {"total": {"value": 0}, "hits": []}, + "aggregations": { + "agg": { + "buckets": [{"key": 1, "doc_count": 2}, {"key": 2, "doc_count": 5}] + } + }, + } + assert expected == list(CrucibleService._aggs(payload, "agg")) + + +class TestFilterBuilders: + + @pytest.mark.parametrize( + "filters,terms", + ( + ( + ["param:v=1", "tag:x='one two'", "run:email='d@e.c'"], + ( + [ + { + "dis_max": { + "queries": [ + { + "bool": { + "must": [ + { + "term": { + "param.arg": "v", + }, + }, + { + "term": { + "param.val": "1", + }, + }, + ], + }, + }, + ], + }, + }, + ], + [ + { + "dis_max": { + "queries": [ + { + "bool": { + "must": [ + { + "term": { + "tag.name": "x", + }, + }, + { + "term": { + "tag.val": "one two", + }, + }, + ], + }, + }, + ], + }, + }, + ], + [ + { + "term": { + "run.email": "d@e.c", + }, + }, + ], + ), + ), + ( + ["param:v~a"], + ( + [ + { + "dis_max": { + "queries": [ + { + "bool": { + "must": [ + { + "term": { + "param.arg": "v", + }, + }, + { + "regexp": { + "param.val": ".*a.*", + }, + }, + ], + }, + }, + ], + }, + }, + ], + None, + None, + ), + ), + ( + ["tag:v~a"], + ( + None, + [ + { + "dis_max": { + "queries": [ + { + "bool": { + "must": [ + { + "term": { + "tag.name": "v", + }, + }, + { + "regexp": { + "tag.val": ".*a.*", + }, + }, + ], + }, + }, + ], + }, + }, + ], + None, + ), + ), + ), + ) + def test_build_filter_options(self, filters, terms): + assert terms == CrucibleService._build_filter_options(filters) + + def test_build_filter_bad_key(self): + with pytest.raises(HTTPException) as e: + CrucibleService._build_filter_options(["foobar:x=y"]) + assert 400 == e.value.status_code + assert "unknown filter namespace 'foobar'" == e.value.detail + + def test_build_name_filters(self): + assert [ + {"term": {"metric_desc.names.name": "1"}} + ] == CrucibleService._build_name_filters(["name=1"]) + + def test_build_name_filters_bad(self): + with pytest.raises(HTTPException) as e: + CrucibleService._build_name_filters(["xya:x"]) + assert 400 == e.value.status_code + assert "Filter item 'xya:x' must be '='" + + @pytest.mark.parametrize("periods", ([], ["10"], ["10", "20"])) + def test_build_period_filters(self, periods): + expected = ( + [] + if not periods + else [ + { + "dis_max": { + "queries": [ + {"bool": {"must_not": {"exists": {"field": "period"}}}}, + {"terms": {"period.id": periods}}, + ] + } + } + ] + ) + assert expected == CrucibleService._build_period_filters(periods) + + @pytest.mark.parametrize( + "term,message", + ( + ( + "foo:asc", + "Sort key 'foo' must be one of begin,benchmark,desc,email,end,harness,host,id,name,source", + ), + ("email:up", "Sort direction 'up' must be one of asc,desc"), + ), + ) + def test_build_sort_filters_error(self, term, message): + with pytest.raises(HTTPException) as exc: + CrucibleService._build_sort_terms([term]) + assert 400 == exc.value.status_code + assert message == exc.value.detail + + @pytest.mark.parametrize( + "sort,terms", + ( + ([], (("run.begin", {"order": "asc"}),)), + (["email:asc"], (("run.email", {"order": "asc"}),)), + ( + ["email:desc", "name:asc"], + (("run.email", {"order": "desc"}), ("run.name", {"order": "asc"})), + ), + ), + ) + def test_build_sort_filters(self, sort, terms): + expected = [{t[0]: t[1]} for t in terms] + assert expected == CrucibleService._build_sort_terms(sort) + + @pytest.mark.parametrize( + "periods,result", + ( + ( + [ + { + "period": { + "id": "one", + "begin": "1733505934677", + "end": "1733507347857", + } + } + ], + [ + {"range": {"metric_data.begin": {"gte": "1733505934677"}}}, + {"range": {"metric_data.end": {"lte": "1733507347857"}}}, + ], + ), + (None, []), + ), + ) + async def test_build_timestamp_filter( + self, fake_crucible: CrucibleService, periods, result + ): + plist = None + if periods: + fake_crucible.elastic.set_query("period", periods) + plist = [p["period"]["id"] for p in periods] + assert result == await fake_crucible._build_timestamp_range_filters(plist) + + @pytest.mark.parametrize( + "period,name", + ( + ({"period": {"id": "one"}}, "run None:None,iteration None,sample None"), + ( + { + "run": {"id": "rid", "benchmark": "test", "begin": "1234"}, + "iteration": {"id": "iid", "num": 1}, + "sample": {"id": "sid", "num": 1}, + "period": {"id": "one", "begin": "5423"}, + }, + "run test:1234,iteration 1,sample 1", + ), + ), + ) + async def test_build_timestamp_filter_bad( + self, fake_crucible: CrucibleService, period, name + ): + fake_crucible.elastic.set_query("period", [period]) + with pytest.raises(HTTPException) as exc: + await fake_crucible._build_timestamp_range_filters(["one"]) + assert 422 == exc.value.status_code + assert ( + f"Unable to compute '{name}' time range: the run is missing period timestamps" + == exc.value.detail + ) + + +class TestCrucible: + + async def test_create(self, fake_crucible): + """Create and close a CrucibleService instance""" + + assert fake_crucible + assert isinstance(fake_crucible, CrucibleService) + assert isinstance(fake_crucible.elastic, AsyncElasticsearch) + assert app.config.get_config().get("TEST.url") == fake_crucible.url + elastic = fake_crucible.elastic + await fake_crucible.close() + assert fake_crucible.elastic is None + assert elastic.closed + + async def test_search_args(self, fake_crucible: CrucibleService): + await fake_crucible.search( + "run", + [{"term": "a"}], + [{"x": {"field": "a"}}], + [{"key": "asc"}], + "run", + 42, + 69, + x=2, + z=3, + ) + assert [ + Request( + "cdmv7dev-run", + { + "_source": "run", + "aggs": [ + { + "x": { + "field": "a", + }, + }, + ], + "from": 69, + "query": { + "bool": { + "filter": [ + { + "term": "a", + }, + ], + }, + }, + "size": 42, + "sort": [ + { + "key": "asc", + }, + ], + }, + None, + None, + None, + {"x": 2, "z": 3}, + ) + ] == fake_crucible.elastic.requests + + async def test_metric_ids_none(self, fake_crucible): + """A simple query for failure matching metric IDs""" + + fake_crucible.elastic.set_query("metric_desc", []) + with pytest.raises(HTTPException) as e: + await fake_crucible._get_metric_ids("runid", "source::type") + assert 400 == e.value.status_code + assert "No matches for source::type" == e.value.detail + + @pytest.mark.parametrize( + "found,expected,aggregate", + ( + ( + [ + {"metric_desc": {"id": "one-metric"}}, + ], + ["one-metric"], + False, + ), + ( + [ + {"metric_desc": {"id": "one-metric"}}, + ], + ["one-metric"], + True, + ), + ( + [ + {"metric_desc": {"id": "one-metric"}}, + {"metric_desc": {"id": "two-metric"}}, + ], + ["one-metric", "two-metric"], + True, + ), + ), + ) + async def test_metric_ids(self, fake_crucible, found, expected, aggregate): + """A simple query for matching metric IDs""" + + fake_crucible.elastic.set_query("metric_desc", found) + assert expected == await fake_crucible._get_metric_ids( + "runid", + "source::type", + aggregate=aggregate, + ) + + @pytest.mark.parametrize( + "found,message", + ( + ( + [ + {"metric_desc": {"id": "one-metric", "names": {"john": "yes"}}}, + {"metric_desc": {"id": "two-metric", "names": {"john": "no"}}}, + ], + (2, [], {"john": ["no", "yes"]}), + ), + ( + [ + { + "period": {"id": "p1"}, + "metric_desc": {"id": "three-metric", "names": {"john": "yes"}}, + }, + {"metric_desc": {"id": "four-metric", "names": {"fred": "why"}}}, + { + "period": {"id": "p2"}, + "metric_desc": {"id": "five-metric", "names": {"john": "sure"}}, + }, + {"metric_desc": {"id": "six-metric", "names": {"john": "maybe"}}}, + ], + (4, ["p1", "p2"], {"john": ["maybe", "sure", "yes"]}), + ), + ), + ) + async def test_metric_ids_unproc(self, fake_crucible, found, message): + """Test matching metric IDs with lax criteria""" + + fake_crucible.elastic.set_query("metric_desc", found) + with pytest.raises(HTTPException) as exc: + await fake_crucible._get_metric_ids( + "runid", + "source::type", + aggregate=False, + ) + assert 422 == exc.value.status_code + assert { + "message": f"More than one metric ({message[0]}) means you should add breakout filters or aggregate.", + "periods": message[1], + "names": message[2], + } == exc.value.detail + + async def test_run_filters(self, fake_crucible): + """Test aggregations + + This is the "simplest" aggregation-based query, but we need to define + fake aggregations for the tag, param, and run indices. + """ + + fake_crucible.elastic.set_query( + "tag", + aggregations={ + "key": [ + { + "key": "topology", + "doc_count": 25, + "values": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [], + }, + }, + { + "key": "accelerator", + "doc_count": 19, + "values": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + {"key": "A100", "doc_count": 5}, + {"key": "L40S", "doc_count": 2}, + ], + }, + }, + { + "key": "project", + "doc_count": 19, + "values": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + {"key": "rhelai", "doc_count": 1}, + {"key": "rhosai", "doc_count": 2}, + ], + }, + }, + ] + }, + ) + fake_crucible.elastic.set_query( + "param", + aggregations={ + "key": [ + { + "key": "bucket", + "doc_count": 25, + "values": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [{"key": 200, "doc_count": 30}], + }, + }, + ] + }, + ) + fake_crucible.elastic.set_query( + "run", + aggregations={ + "begin": [{"key": 123456789, "doc_count": 1}], + "benchmark": [{"key": "ilab", "doc_count": 25}], + "desc": [], + "email": [ + {"key": "me@example.com", "doc_count": 10}, + {"key": "you@example.com", "doc_count": 15}, + ], + "end": [{"key": 1234, "doc_count": 10}], + "harness": [], + "host": [ + {"key": "one.example.com", "doc_count": 5}, + {"key": "two.example.com", "doc_count": 20}, + ], + "id": [], + "name": [], + "source": [], + }, + ) + filters = await fake_crucible.get_run_filters() + + # Array ordering is not reliable, so we need to sort + assert sorted(filters.keys()) == ["param", "run", "tag"] + assert sorted(filters["tag"].keys()) == ["accelerator", "project"] + assert sorted(filters["param"].keys()) == ["bucket"] + assert sorted(filters["run"].keys()) == ["benchmark", "email", "host"] + assert sorted(filters["tag"]["accelerator"]) == ["A100", "L40S"] + assert sorted(filters["param"]["bucket"]) == [200] + assert sorted(filters["run"]["benchmark"]) == ["ilab"] + assert sorted(filters["run"]["email"]) == ["me@example.com", "you@example.com"] + assert sorted(filters["run"]["host"]) == ["one.example.com", "two.example.com"] + + async def test_get_run_ids(self, fake_crucible: CrucibleService): + """_get_run_ids + + This is just straightline code coverage as there's no point in mocking + the filters. + """ + fake_crucible.elastic.set_query( + "period", + [{"run": {"id": "one"}}, {"run": {"id": "two"}}, {"run": {"id": "three"}}], + ) + assert {"one", "two", "three"} == await fake_crucible._get_run_ids( + "period", [{"term": {"period.name": "measurement"}}] + ) + + async def test_get_runs_none(self, fake_crucible: CrucibleService): + """Test run summary""" + fake_crucible.elastic.set_query("run", []) + fake_crucible.elastic.set_query("iteration", []) + fake_crucible.elastic.set_query("tag", []) + fake_crucible.elastic.set_query("param", []) + assert { + "count": 0, + "offset": 0, + "results": [], + "sort": [], + "total": 0, + } == await fake_crucible.get_runs() + + async def test_get_runs_time_reverse(self, fake_crucible: CrucibleService): + """Test run summary""" + fake_crucible.elastic.set_query("run", []) + fake_crucible.elastic.set_query("iteration", []) + fake_crucible.elastic.set_query("tag", []) + fake_crucible.elastic.set_query("param", []) + with pytest.raises(HTTPException) as exc: + await fake_crucible.get_runs(start="2025-01-01", end="2024-01-01") + assert 422 == exc.value.status_code + assert { + "error": "Invalid date format, start_date must be less than end_date" + } == exc.value.detail + + @pytest.mark.parametrize( + "args,miss,notag,noparam", + ( + ({}, False, False, False), + ({"size": 2, "offset": 1}, False, False, False), + ({"start": "2024-01-01"}, False, False, False), + ({"end": "2024-02-01"}, False, False, False), + ({"start": "2024-01-01", "end": "2025-01-01"}, False, False, False), + ({"sort": ["end:desc"]}, False, False, False), + ( + {"filter": ["tag:a=42", "param:z=xyzzy", "run:benchmark=test"]}, + False, + False, + False, + ), + ({"filter": ["tag:a=42", "param:z=xyzzy"]}, True, False, False), + ({"filter": ["tag:a=42", "param:z=xyzzy"]}, False, True, False), + ({"filter": ["tag:a=42", "param:z=xyzzy"]}, False, False, True), + ), + ) + async def test_get_runs_queries( + self, args, miss, notag, noparam, fake_crucible: CrucibleService + ): + """Test processing of various query parameters + + Note, this isn't really testing "behavior" of the filters, which is all + in Opensearch, just the CPT service's handling of the query parameters. + + TBD: This should really verify the generated Opensearch query filters, + although that's mostly covered by earlier tests. + """ + runs = [ + {"run": {"id": "r1", "begin": "0", "end": "5000", "benchmark": "test"}}, + ] + if miss: + # Add additional runs which will be rejected by filters + runs.extend( + [ + { + "run": { + "id": "r2", + "begin": "110", + "end": "7000", + "benchmark": "test", + } + }, + { + "run": { + "id": "r3", + "begin": "110", + "end": "6000", + "benchmark": "test", + } + }, + ] + ) + fake_crucible.elastic.set_query("run", runs) + fake_crucible.elastic.set_query( + "iteration", + [ + { + "run": {"id": "r1"}, + "iteration": { + "id": "i1", + "num": 1, + "primary-period": "tp", + "primary-metric": "src::tst1", + "status": "pass", + }, + }, + { + "run": {"id": "r1"}, + "iteration": { + "id": "i2", + "num": 2, + "primary-period": "tp", + "primary-metric": "src::tst2", + "status": "pass", + }, + }, + { + "run": {"id": "r1"}, + "iteration": { + "id": "i3", + "num": 3, + "primary-period": "tp", + "primary-metric": "src::tst1", + "status": "fail", + }, + }, + ], + ) + + if notag: + tags = [] + else: + tags = [ + {"run": {"id": "r1"}, "tag": {"name": "a", "val": 42}}, + {"run": {"id": "r2"}, "tag": {"name": "a", "val": 42}}, + ] + fake_crucible.elastic.set_query("tag", tags, repeat=2) + + if noparam: + params = [] + else: + params = [ + { + "run": {"id": "r1"}, + "iteration": {"id": "i1"}, + "param": {"arg": "b", "val": "cde"}, + }, + { + "run": {"id": "r1"}, + "iteration": {"id": "i1"}, + "param": {"arg": "z", "val": "xyzzy"}, + }, + { + "run": {"id": "r3"}, + "iteration": {"id": "i1"}, + "param": {"arg": "z", "val": "xyzzy"}, + }, + { + "run": {"id": "r1"}, + "iteration": {"id": "i2"}, + "param": {"arg": "b", "val": "cde"}, + }, + { + "run": {"id": "r1"}, + "iteration": {"id": "i2"}, + "param": {"arg": "x", "val": "plugh"}, + }, + ] + fake_crucible.elastic.set_query("param", params, repeat=2) + expected = { + "count": 1, + "offset": 0, + "results": [ + { + "begin": "0", + "begin_date": "1970-01-01 00:00:00+00:00", + "benchmark": "test", + "end": "5000", + "end_date": "1970-01-01 00:00:05+00:00", + "id": "r1", + "iterations": [ + { + "iteration": 1, + "params": defaultdict( + None, + { + "b": "cde", + "z": "xyzzy", + }, + ), + "primary_metric": "src::tst1", + "primary_period": "tp", + "status": "pass", + }, + { + "iteration": 2, + "params": defaultdict( + None, + { + "b": "cde", + "x": "plugh", + }, + ), + "primary_metric": "src::tst2", + "primary_period": "tp", + "status": "pass", + }, + { + "iteration": 3, + "params": {}, + "primary_metric": "src::tst1", + "primary_period": "tp", + "status": "fail", + }, + ], + "params": {}, + "primary_metrics": {"src::tst1", "src::tst2"}, + "status": "fail", + "tags": defaultdict(None, {"a": 42}), + }, + ], + "sort": [], + "total": 1, + } + if notag or noparam: + expected["results"] = [] + expected["count"] = 0 + expected["total"] = 0 + else: + if miss: + expected["total"] = 3 + if "size" in args: + expected["size"] = args["size"] + if args.get("offset"): + expected["offset"] = args["offset"] + if args.get("start"): + expected["startDate"] = ( + datetime.fromisoformat(args["start"]) + .astimezone(tz=timezone.utc) + .isoformat() + ) + if args.get("end"): + expected["endDate"] = ( + datetime.fromisoformat(args["end"]) + .astimezone(tz=timezone.utc) + .isoformat() + ) + if args.get("sort"): + expected["sort"] = args["sort"] + assert expected == await fake_crucible.get_runs(**args) + + async def test_get_tags(self, fake_crucible: CrucibleService): + """Get tags for a run ID""" + fake_crucible.elastic.set_query( + "tag", + [ + {"run": {"id": "one"}, "tag": {"name": "a", "val": 123}}, + {"run": {"id": "one"}, "tag": {"name": "b", "val": "hello"}}, + {"run": {"id": "one"}, "tag": {"name": "c", "val": False}}, + ], + ) + assert {"a": 123, "b": "hello", "c": False} == await fake_crucible.get_tags( + "one" + ) + + async def test_get_params_none(self, fake_crucible: CrucibleService): + """Test error when neither run nor iteration is specified""" + with pytest.raises(HTTPException) as exc: + await fake_crucible.get_params() + assert 400 == exc.value.status_code + assert ( + "A params query requires either a run or iteration ID" == exc.value.detail + ) + + async def test_get_params_run(self, fake_crucible: CrucibleService): + """Get parameters for a run""" + params = [ + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "a", "val": 10}, + }, + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "b", "val": 5}, + }, + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "c", "val": "val"}, + }, + { + "run": {"id": "rid"}, + "iteration": {"id": "iid2"}, + "param": {"arg": "a", "val": 7}, + }, + { + "run": {"id": "rid"}, + "iteration": {"id": "iid2"}, + "param": {"arg": "c", "val": "val"}, + }, + ] + fake_crucible.elastic.set_query("param", params) + assert { + "common": {"c": "val"}, + "iid1": {"a": 10, "b": 5, "c": "val"}, + "iid2": {"a": 7, "c": "val"}, + } == await fake_crucible.get_params("rid") + + async def test_get_params_iteration(self, fake_crucible: CrucibleService): + """Get parameters for an iteration""" + params = [ + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "a", "val": 10}, + }, + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "b", "val": 5}, + }, + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "c", "val": "val"}, + }, + ] + fake_crucible.elastic.set_query("param", params) + assert { + "iid1": {"a": 10, "b": 5, "c": "val"} + } == await fake_crucible.get_params(None, "iid1") + + async def test_get_params_iteration_dup(self, fake_crucible: CrucibleService): + """Cover an obscure log warning case""" + params = [ + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "a", "val": 10}, + }, + { + "run": {"id": "rid"}, + "iteration": {"id": "iid1"}, + "param": {"arg": "a", "val": 5}, + }, + ] + fake_crucible.elastic.set_query("param", params) + assert {"iid1": {"a": 5}} == await fake_crucible.get_params(None, "iid1") + + async def test_get_iterations(self, fake_crucible: CrucibleService): + """Get iterations for a run ID""" + iterations = [ + { + "id": "one", + "num": 1, + "path": None, + "primary_metric": "test::metric1", + "primary_period": "measurement", + "status": "pass", + }, + { + "id": "two", + "num": 2, + "path": None, + "primary_metric": "test::metric2", + "primary_period": "measurement", + "status": "pass", + }, + { + "id": "three", + "num": 3, + "path": None, + "primary_metric": "test::metric1", + "primary_period": "measurement", + "status": "pass", + }, + ] + fake_crucible.elastic.set_query( + "iteration", + [ + { + "run": {"id": "one"}, + "iteration": i, + } + for i in iterations + ], + ) + assert iterations == await fake_crucible.get_iterations("one") + + async def test_get_samples_none(self, fake_crucible: CrucibleService): + """Test error when neither run nor iteration is specified""" + with pytest.raises(HTTPException) as exc: + await fake_crucible.get_samples() + assert 400 == exc.value.status_code + assert ( + "A sample query requires either a run or iteration ID" == exc.value.detail + ) + + @pytest.mark.parametrize("ids", (("one", None), (None, 1))) + async def test_get_samples(self, fake_crucible: CrucibleService, ids): + """Get samples for a run ID""" + samples = [ + { + "num": "1", + "path": None, + "id": "one", + "status": "pass", + "primary_metric": "pm", + "primary_period": "m", + "iteration": 1, + }, + { + "id": "two", + "num": "2", + "path": None, + "status": "pass", + "primary_metric": "pm", + "primary_period": "m", + "iteration": 1, + }, + { + "id": "three", + "num": "3", + "path": None, + "status": "pass", + "primary_metric": "pm", + "primary_period": "m", + "iteration": 1, + }, + ] + fake_crucible.elastic.set_query( + "sample", + [ + { + "run": {"id": "one"}, + "iteration": { + "primary-metric": "pm", + "primary-period": "m", + "num": 1, + }, + "sample": s, + } + for s in samples + ], + ) + assert samples == await fake_crucible.get_samples(*ids) + + async def test_get_periods_none(self, fake_crucible: CrucibleService): + """Test error when neither run, iteration, nor sample is specified""" + with pytest.raises(HTTPException) as exc: + await fake_crucible.get_periods() + assert 400 == exc.value.status_code + assert ( + "A period query requires a run, iteration, or sample ID" == exc.value.detail + ) + + @pytest.mark.parametrize( + "ids", (("one", None, None), (None, 1, None), (None, None, 1)) + ) + async def test_get_periods(self, fake_crucible: CrucibleService, ids): + """Get samples for a run ID""" + periods = [ + { + "begin": "2024-12-05 21:16:31.046000+00:00", + "end": "2024-12-05 21:40:31.166000+00:00", + "id": "306C8A78-B352-11EF-8E37-AD212D0A0B9F", + "name": "measurement", + "iteration": 1, + "sample": 1, + "primary_metric": "ilab::sdg-samples-sec", + "status": "pass", + } + ] + fake_crucible.elastic.set_query( + "period", + [ + { + "run": {"id": "one"}, + "iteration": { + "primary-metric": p["primary_metric"], + "primary-period": "measurement", + "num": 1, + "status": p["status"], + }, + "sample": {"num": 1, "status": p["status"], "path": None}, + "period": { + "id": p["id"], + "name": p["name"], + "begin": str( + int(datetime.fromisoformat(p["begin"]).timestamp() * 1000) + ), + "end": str( + int(datetime.fromisoformat(p["end"]).timestamp() * 1000) + ), + "primary-metric": p["primary_metric"], + "status": p["status"], + }, + } + for p in periods + ], + ) + assert periods == await fake_crucible.get_periods(*ids) + + async def test_get_metrics_list(self, fake_crucible: CrucibleService): + """Get samples for a run ID""" + metrics = { + "source1::type1": { + "periods": [], + "breakouts": {"name1": ["value1", "value2"]}, + }, + "source1::type2": {"periods": ["p1", "p2"], "breakouts": {}}, + } + query = [ + { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "names": {"name1": "value1"}, + }, + }, + { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "names": {"name1": "value1"}, + }, + }, + { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "names": {"name1": "value2"}, + }, + }, + { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "names": {"name1": "value2"}, + }, + }, + { + "run": {"id": "one"}, + "period": {"id": "p1"}, + "metric_desc": {"source": "source1", "type": "type2", "names": {}}, + }, + { + "run": {"id": "one"}, + "period": {"id": "p2"}, + "metric_desc": {"source": "source1", "type": "type2", "names": {}}, + }, + ] + fake_crucible.elastic.set_query("metric_desc", query) + result = await fake_crucible.get_metrics_list("one") + + # NOTE: the method returns a defaultdict, which doesn't compare to a + # dict but "in the real world" serializes the same: so we just + # serialize and deserialize to mimic the actual API behavior. + result = json.loads(json.dumps(result)) + assert metrics == result + + async def test_get_metric_breakout_none(self, fake_crucible: CrucibleService): + """Test error when the metric isn't found""" + fake_crucible.elastic.set_query("metric_desc", []) + with pytest.raises(HTTPException) as exc: + await fake_crucible.get_metric_breakouts( + "one", metric="source1::type1", names=[], periods=[] + ) + assert 400 == exc.value.status_code + assert "Metric name source1::type1 not found for run one" == exc.value.detail + + @pytest.mark.parametrize("period", (True, False)) + async def test_get_metric_breakout(self, period, fake_crucible: CrucibleService): + """Get samples for a run ID""" + metrics = { + "label": "source1::type1", + "class": ["classless", "classy"], + "type": "type1", + "source": "source1", + "breakouts": {"name1": ["value1", "value2"]}, + } + md1 = { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "class": "classy", + "names": {"name1": "value1"}, + }, + } + md2 = { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "names": {"name1": "value2"}, + }, + } + if period: + metrics["periods"] = ["p1", "p2"] + md1["period"] = {"id": "p1"} + md2["period"] = {"id": "p2"} + query = [ + md1, + md2, + { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "class": "classless", + "names": {"name1": "value1"}, + }, + }, + { + "run": {"id": "one"}, + "metric_desc": { + "source": "source1", + "type": "type1", + "names": {"name1": "value2"}, + }, + }, + ] + fake_crucible.elastic.set_query("metric_desc", query) + result = await fake_crucible.get_metric_breakouts( + "one", metric="source1::type1", names=[], periods=[] + ) + + # NOTE: the method returns a defaultdict, which doesn't compare to a + # dict but "in the real world" serializes the same: so we just + # serialize and deserialize to mimic the actual API behavior. + result = json.loads(json.dumps(result)) + assert metrics == result + + async def test_metrics_data_one_noagg(self, fake_crucible: CrucibleService): + """Return data samples for a single metric""" + + fake_crucible.elastic.set_query( + "metric_desc", + [{"metric_desc": {"id": "one-metric", "names": {}}}], + ) + fake_crucible.elastic.set_query( + "metric_data", + [ + { + "metric_desc": {"id": "one-metric"}, + "metric_data": { + "begin": "1726165775123", + "end": "1726165789213", + "duration": 14100, + "value": 9.35271216694379, + }, + }, + { + "metric_desc": {"id": "one-metric"}, + "metric_data": { + "begin": "1726165790000", + "end": "1726165804022", + "duration": 14022, + "value": 9.405932330557683, + }, + }, + ], + ) + expected = [ + { + "begin": "2024-09-12 18:29:35.123000+00:00", + "duration": 14.1, + "end": "2024-09-12 18:29:49.213000+00:00", + "value": 9.35271216694379, + }, + { + "begin": "2024-09-12 18:29:50+00:00", + "duration": 14.022, + "end": "2024-09-12 18:30:04.022000+00:00", + "value": 9.405932330557683, + }, + ] + assert expected == await fake_crucible.get_metrics_data("runid", "source::type") + assert fake_crucible.elastic.requests == [ + Request( + "cdmv7dev-metric_desc", + { + "query": { + "bool": { + "filter": [ + { + "term": { + "run.id": "runid", + }, + }, + { + "term": { + "metric_desc.source": "source", + }, + }, + { + "term": { + "metric_desc.type": "type", + }, + }, + ], + }, + }, + "size": 262144, + }, + kwargs={"ignore_unavailable": True}, + ), + Request( + "cdmv7dev-metric_data", + { + "query": { + "bool": { + "filter": [ + { + "terms": { + "metric_desc.id": [ + "one-metric", + ], + }, + }, + ], + }, + }, + "size": 262144, + }, + ), + ] + + @pytest.mark.parametrize("count", (0, 2)) + async def test_metrics_data_agg(self, count, fake_crucible): + """Return data samples for aggregated metrics""" + + fake_crucible.elastic.set_query( + "metric_desc", + [ + {"metric_desc": {"id": "one-metric", "names": {}}}, + {"metric_desc": {"id": "two-metric", "names": {}}}, + ], + ) + fake_crucible.elastic.set_query( + "metric_data", + aggregations={ + "duration": { + "count": count, + "min": 14022, + "max": 14100, + "avg": 14061, + "sum": 28122, + } + }, + ) + if count: + fake_crucible.elastic.set_query( + "metric_data", + aggregations={ + "interval": [ + {"key": 1726165789213, "value": {"value": 9.35271216694379}}, + {"key": 1726165804022, "value": {"value": 9.405932330557683}}, + ] + }, + ) + expected = [ + { + "begin": "2024-09-12 18:29:35.191000+00:00", + "duration": 14.022, + "end": "2024-09-12 18:29:49.213000+00:00", + "value": 9.35271216694379, + }, + { + "begin": "2024-09-12 18:29:50+00:00", + "duration": 14.022, + "end": "2024-09-12 18:30:04.022000+00:00", + "value": 9.405932330557683, + }, + ] + else: + expected = [] + assert expected == await fake_crucible.get_metrics_data( + "r1", "source::type", aggregate=True + ) + expected_requests = [ + Request( + "cdmv7dev-metric_desc", + { + "query": { + "bool": { + "filter": [ + { + "term": { + "run.id": "r1", + }, + }, + { + "term": { + "metric_desc.source": "source", + }, + }, + { + "term": { + "metric_desc.type": "type", + }, + }, + ], + }, + }, + "size": 262144, + }, + kwargs={"ignore_unavailable": True}, + ), + Request( + "cdmv7dev-metric_data", + { + "aggs": { + "duration": { + "stats": { + "field": "metric_data.duration", + }, + }, + }, + "query": { + "bool": { + "filter": [ + { + "terms": { + "metric_desc.id": [ + "one-metric", + "two-metric", + ], + }, + }, + ], + }, + }, + "size": 0, + }, + ), + ] + if count: + expected_requests.append( + Request( + "cdmv7dev-metric_data", + { + "aggs": { + "interval": { + "aggs": { + "value": { + "sum": { + "field": "metric_data.value", + }, + }, + }, + "histogram": { + "field": "metric_data.end", + "interval": 14022, + }, + }, + }, + "query": { + "bool": { + "filter": [ + { + "terms": { + "metric_desc.id": [ + "one-metric", + "two-metric", + ], + }, + }, + ], + }, + }, + "size": 0, + }, + ), + ) + assert fake_crucible.elastic.requests == expected_requests + + async def test_metrics_summary(self, fake_crucible: CrucibleService): + """Return data summary for a metrics""" + + fake_crucible.elastic.set_query( + "metric_desc", + [ + {"metric_desc": {"id": "one-metric", "names": {"a": "1"}}}, + ], + ) + expected = { + "count": 5, + "min": 9.35271216694379, + "max": 9.405932330557683, + "avg": 9.379322249, + "sum": 18.758644498, + } + fake_crucible.elastic.set_query("metric_data", aggregations={"score": expected}) + assert expected == await fake_crucible.get_metrics_summary( + "runid", "one-metric::type", ["a=1"] + ) + assert fake_crucible.elastic.requests == [ + Request( + "cdmv7dev-metric_desc", + { + "query": { + "bool": { + "filter": [ + { + "term": { + "run.id": "runid", + }, + }, + { + "term": { + "metric_desc.source": "one-metric", + }, + }, + { + "term": { + "metric_desc.type": "type", + }, + }, + {"term": {"metric_desc.names.a": "1"}}, + ], + }, + }, + "size": 262144, + }, + kwargs={"ignore_unavailable": True}, + ), + Request( + "cdmv7dev-metric_data", + { + "aggs": {"score": {"stats": {"field": "metric_data.value"}}}, + "query": { + "bool": { + "filter": [ + { + "terms": { + "metric_desc.id": [ + "one-metric", + ], + }, + }, + ], + }, + }, + "size": 0, + }, + ), + ] + + @pytest.mark.parametrize( + "runs,param_idx,periods,period_idx,title", + ( + ([], 0, [], 0, "source::type"), + (["r2", "r1"], 0, [], 0, "source::type {run 2}"), + ([], 0, ["p1"], 0, "source::type (n=42)"), + ([], 1, ["p1"], 1, "source::type"), + ([], 1, ["p1"], 2, "source::type"), + ), + ) + async def test_graph_title_no_query( + self, + runs, + param_idx, + periods, + period_idx, + title, + fake_crucible: CrucibleService, + ): + """Test generation of default metric titles""" + + param_runs = [ + {"r1": {"i1": {"n": "42"}, "i2": {"n": "31"}}}, + {"r1": {"i1": {"n": "42"}, "i2": {"n": "42"}}}, + ][param_idx] + period_runs = [ + {"r1": {"i1": {"p1"}, "i2": {"p2"}}}, + {"r1": {"i1": {"p1"}}}, + {"r1": {"i1": {"p2"}}}, + ][period_idx] + name = await fake_crucible._graph_title( + "r1", + runs, + Graph(metric="source::type", periods=periods), + param_runs, + period_runs, + ) + assert name == title + + async def test_graph_title_query(self, fake_crucible: CrucibleService): + """Test generation of default metric titles""" + + param_runs = {} + period_runs = {} + fake_crucible.elastic.set_query( + "param", + [ + { + "run": {"id": "r1"}, + "iteration": {"id": "i1"}, + "param": {"arg": "a", "val": "1"}, + }, + ], + ) + fake_crucible.elastic.set_query( + "period", + [ + { + "run": {"id": "r1"}, + "iteration": {"id": "i1"}, + "period": {"id": "p1"}, + }, + ], + ) + name = await fake_crucible._graph_title( + "r1", + [], + Graph(metric="source::type"), + param_runs, + period_runs, + ) + assert name == "source::type" + assert fake_crucible.elastic.requests == [ + Request( + "cdmv7dev-param", + { + "query": { + "bool": { + "filter": [ + { + "term": { + "run.id": "r1", + }, + }, + ], + }, + }, + "size": 262144, + }, + ), + Request( + "cdmv7dev-period", + { + "query": { + "bool": { + "filter": [ + { + "term": { + "run.id": "r1", + }, + }, + ], + }, + }, + "size": 262144, + }, + ), + ] + + async def test_metrics_graph_norun(self, fake_crucible: CrucibleService): + with pytest.raises(HTTPException) as exc: + await fake_crucible.get_metrics_graph( + GraphList( + name="graph", + graphs=[Graph(metric="source::type", aggregate=True, title="test")], + ) + ) + assert exc.value.status_code == 400 + assert exc.value.detail == "each graph request must have a run ID" + + @pytest.mark.parametrize("count", (0, 2)) + async def test_metrics_graph(self, count, fake_crucible: CrucibleService): + """Return graph for aggregated metrics""" + + metrics = [{"metric_desc": {"id": "one-metric", "names": {}}}] + if count: + metrics.append({"metric_desc": {"id": "two-metric", "names": {}}}) + fake_crucible.elastic.set_query( + "metric_data", + aggregations={ + "duration": { + "count": count, + "min": 14022, + "max": 14100, + "avg": 14061, + "sum": 28122, + } + }, + ) + fake_crucible.elastic.set_query( + "metric_data", + aggregations={ + "interval": [ + {"key": 1726165789213, "value": {"value": 9.35271216694379}}, + {"key": 1726165804022, "value": {"value": 9.405932330557683}}, + ] + }, + ) + expected = { + "data": [ + { + "labels": { + "x": "sample timestamp", + "y": "samples / second", + }, + "marker": { + "color": "black", + }, + "mode": "line", + "name": "test", + "type": "scatter", + "x": [ + "2024-09-12 18:29:49.213000+00:00", + "2024-09-12 18:30:03.234000+00:00", + "2024-09-12 18:30:04.022000+00:00", + "2024-09-12 18:30:18.043000+00:00", + ], + "y": [ + 9.35271216694379, + 9.35271216694379, + 9.405932330557683, + 9.405932330557683, + ], + "yaxis": "y", + }, + ], + "layout": { + "width": "1500", + "yaxis": { + "color": "black", + "title": "source::type", + }, + }, + } + else: + expected = { + "data": [ + { + "labels": { + "x": "sample timestamp", + "y": "samples / second", + }, + "marker": { + "color": "black", + }, + "mode": "line", + "name": "test", + "type": "scatter", + "x": [ + "2024-09-12 18:29:49.213000+00:00", + "2024-09-12 18:29:50.213000+00:00", + "2024-09-12 18:30:04.022000+00:00", + "2024-09-12 18:30:05.022000+00:00", + ], + "y": [ + 9.35271216694379, + 9.35271216694379, + 9.405932330557683, + 9.405932330557683, + ], + "yaxis": "y", + }, + ], + "layout": { + "width": "1500", + "yaxis": { + "color": "black", + "title": "source::type", + }, + }, + } + fake_crucible.elastic.set_query( + "metric_data", + [ + { + "metric_data": { + "begin": "1726165789213", + "end": "1726165790213", + "value": 9.35271216694379, + } + }, + { + "metric_data": { + "begin": "1726165804022", + "end": "1726165805022", + "value": 9.405932330557683, + } + }, + ], + ) + fake_crucible.elastic.set_query("metric_desc", metrics) + + assert expected == await fake_crucible.get_metrics_graph( + GraphList( + run="r1", + name="graph", + graphs=[Graph(metric="source::type", aggregate=True, title="test")], + ) + ) + expected_requests = [ + Request( + "cdmv7dev-metric_desc", + { + "query": { + "bool": { + "filter": [ + { + "term": { + "run.id": "r1", + }, + }, + { + "term": { + "metric_desc.source": "source", + }, + }, + { + "term": { + "metric_desc.type": "type", + }, + }, + ], + }, + }, + "size": 262144, + }, + kwargs={"ignore_unavailable": True}, + ), + ] + if count: + expected_requests.extend( + [ + Request( + "cdmv7dev-metric_data", + { + "aggs": { + "duration": { + "stats": { + "field": "metric_data.duration", + }, + }, + }, + "query": { + "bool": { + "filter": [ + { + "terms": { + "metric_desc.id": [ + "one-metric", + "two-metric", + ], + }, + }, + ], + }, + }, + "size": 0, + }, + ), + Request( + "cdmv7dev-metric_data", + { + "aggs": { + "interval": { + "aggs": { + "value": { + "sum": { + "field": "metric_data.value", + }, + }, + }, + "histogram": { + "field": "metric_data.begin", + "interval": 14022, + }, + }, + }, + "query": { + "bool": { + "filter": [ + { + "terms": { + "metric_desc.id": [ + "one-metric", + "two-metric", + ], + }, + }, + ], + }, + }, + "size": 0, + }, + ), + ] + ) + else: + expected_requests.append( + Request( + "cdmv7dev-metric_data", + { + "query": { + "bool": { + "filter": [ + { + "terms": { + "metric_desc.id": ["one-metric"], + }, + }, + ], + }, + }, + "size": 262144, + }, + ), + ) + assert fake_crucible.elastic.requests == expected_requests diff --git a/backend/tests/unit/test_ilab_api.py b/backend/tests/unit/test_ilab_api.py new file mode 100644 index 00000000..cf7e0226 --- /dev/null +++ b/backend/tests/unit/test_ilab_api.py @@ -0,0 +1,311 @@ +from datetime import datetime + +from fastapi import HTTPException +import pytest +from starlette.testclient import TestClient + +from app.main import app as fastapi_app +from app.services.crucible_svc import CrucibleService, Graph, GraphList + + +@pytest.fixture +def client(): + """Create a Starlette test client.""" + yield TestClient(fastapi_app) + + +class TestInit: + + @pytest.mark.parametrize( + "exc", (None, HTTPException(status_code=501, detail="test")) + ) + def test_crucible(self, exc, monkeypatch, client: TestClient): + + class FakeCrucible(CrucibleService): + def __init__(self, config): + if exc: + raise exc + self.url = "For me to know" + + async def get_run_filters(self) -> dict[str, list[str]]: + return {} + + async def close(self): + pass + + monkeypatch.setattr( + "app.api.v1.endpoints.ilab.ilab.CrucibleService", FakeCrucible + ) + response = client.get("/api/v1/ilab/runs/filters") + if exc: + assert response.json() == {"detail": "test"} + assert response.status_code == 501 + else: + assert response.json() == {} + assert response.status_code == 200 + + +class TestIlabApi: + + def test_filters(self, monkeypatch, client: TestClient, fake_crucible): + async def fake_get(self): + return {"param": {}, "tag": {}, "run": {}} + + monkeypatch.setattr( + "app.services.crucible_svc.CrucibleService.get_run_filters", fake_get + ) + response = client.get("/api/v1/ilab/runs/filters") + assert response.json() == {"param": {}, "tag": {}, "run": {}} + assert response.status_code == 200 + + @pytest.mark.parametrize( + "stime,etime,expected_start,expected_end", + ( + (None, None, "2023-12-16T10:05:00.000100", "2024-01-15T10:05:00.000100"), + ("2024-01-01", None, "2024-01-01", None), + (None, "2024-02-01", None, "2024-02-01"), + ("2024-01-01", "2024-02-01", "2024-01-01", "2024-02-01"), + ), + ) + def test_runs( + self, + stime, + etime, + expected_start, + expected_end, + monkeypatch, + client: TestClient, + fake_crucible, + ): + expected = { + "results": [], + "count": 0, + "total": 0, + "startDate": expected_start, + "endDate": expected_end, + } + not_now = datetime(2024, 1, 15, 10, 5, 0, 100) + + class mydatetime(datetime): + @classmethod + def now(cls, tz=None): + return not_now + + async def fake_get(self, start, end, filter, sort, size, offset): + return { + "results": [], + "count": 0, + "total": 0, + "startDate": ( + start.isoformat() if isinstance(start, datetime) else start + ), + "endDate": end.isoformat() if isinstance(end, datetime) else end, + } + + monkeypatch.setattr("app.api.v1.endpoints.ilab.ilab.datetime", mydatetime) + monkeypatch.setattr( + "app.services.crucible_svc.CrucibleService.get_runs", fake_get + ) + queries = {} + if stime: + queries["start_date"] = stime + if etime: + queries["end_date"] = etime + response = client.get("/api/v1/ilab/runs", params=queries) + assert response.json() == expected + assert response.status_code == 200 + + @pytest.mark.parametrize( + "api,name,detail", + ( + ("tags", "get_tags", {"tag": "value"}), + ("params", "get_params", {"i1": {"key": 1}}), + ("iterations", "get_iterations", [{"i1": {"num": 1}}]), + ("samples", "get_samples", [{"num": "1"}]), + ("periods", "get_periods", [{"name": "period"}]), + ( + "metrics", + "get_metrics_list", + {"source::type": {"periods": [], "breakouts": {}}}, + ), + ), + ) + def test_run_detail( + self, api, name, detail, monkeypatch, client: TestClient, fake_crucible + ): + async def fake_get(self, run): + assert run == "r1" + return detail + + monkeypatch.setattr( + f"app.services.crucible_svc.CrucibleService.{name}", fake_get + ) + response = client.get(f"/api/v1/ilab/runs/r1/{api}") + assert response.json() == detail + assert response.status_code == 200 + + def test_iteration_samples(self, monkeypatch, client: TestClient, fake_crucible): + async def fake_get(self, run=None, iteration=None): + assert run is None + assert iteration == "i1" + return [{"num": "2"}] + + monkeypatch.setattr( + "app.services.crucible_svc.CrucibleService.get_samples", fake_get + ) + response = client.get("/api/v1/ilab/iterations/i1/samples") + assert response.json() == [{"num": "2"}] + assert response.status_code == 200 + + @pytest.mark.parametrize( + "name,period", ((None, None), (["cpu=1", "x=y"], None), (None, ["p1,p2"])) + ) + @pytest.mark.parametrize( + "api,getter", + (("breakouts", "get_metric_breakouts"), ("summary", "get_metrics_summary")), + ) + def test_metric_name_period( + self, name, period, api, getter, monkeypatch, client: TestClient, fake_crucible + ): + if api == "breakouts": + expected = { + "label": "source::type", + "class": ["test"], + "type": "type", + "source": "source", + "breakouts": {"one": [1, 2]}, + } + else: + expected = {"count": 2, "min": 0.0, "max": 10.0, "avg": 5.0, "sum": 10.0} + + async def fake_get(self, run, metric, names, periods): + assert run == "r1" + assert metric == "source::type" + assert names == name + assert periods == period + return expected + + monkeypatch.setattr( + f"app.services.crucible_svc.CrucibleService.{getter}", fake_get + ) + query = None + if name or period: + query = {} + if name: + query["name"] = name + if period: + query["period"] = period + response = client.get(f"/api/v1/ilab/runs/r1/{api}/source::type", params=query) + assert response.json() == expected + assert response.status_code == 200 + + @pytest.mark.parametrize( + "name,period,agg", + ( + (None, None, False), + (["cpu=1", "x=y"], None, False), + (None, ["p1,p2"], False), + (["cpu=1", "x=y"], None, True), + (None, ["p1,p2"], True), + ), + ) + def test_metric_data( + self, name, period, agg, monkeypatch, client: TestClient, fake_crucible + ): + expected = [{"begin": "t1", "end": "t2", "duration": 0.0, "value": 0.0}] + + async def fake_get(self, run, metric, names, periods, aggregate): + assert run == "r1" + assert metric == "source::type" + assert names == name + assert periods == period + assert aggregate == agg + return expected + + monkeypatch.setattr( + "app.services.crucible_svc.CrucibleService.get_metrics_data", fake_get + ) + query = None + if name or period or agg: + query = {} + if name: + query["name"] = name + if period: + query["period"] = period + if agg: + query["aggregate"] = agg + response = client.get("/api/v1/ilab/runs/r1/data/source::type", params=query) + assert response.json() == expected + assert response.status_code == 200 + + def test_multigraph(self, monkeypatch, client: TestClient, fake_crucible): + expected = [{"data": [{"x": [], "y": []}]}] + + async def fake_get(self, graphs): + assert graphs == GraphList( + run="r1", name="graphs", graphs=[Graph(metric="source::type")] + ) + return expected + + monkeypatch.setattr( + "app.services.crucible_svc.CrucibleService.get_metrics_graph", fake_get + ) + response = client.post( + "/api/v1/ilab/runs/multigraph", + json={ + "run": "r1", + "name": "graphs", + "graphs": [{"metric": "source::type"}], + }, + ) + assert response.json() == expected + assert response.status_code == 200 + + @pytest.mark.parametrize( + "name,period,agg,title", + ( + (None, None, False, None), + (["cpu=1", "x=y"], None, False, "title"), + (None, ["p1,p2"], False, None), + (["cpu=1", "x=y"], None, True, "t2"), + (None, ["p1,p2"], True, None), + ), + ) + def test_metric_graph( + self, name, period, agg, title, monkeypatch, client: TestClient, fake_crucible + ): + expected = [{"data": [{"x": [], "y": []}]}] + + async def fake_get(self, graphs): + assert graphs == GraphList( + run="r1", + name="source::type", + graphs=[ + Graph( + metric="source::type", + aggregate=agg, + names=name, + periods=period, + title=title, + ) + ], + ) + return expected + + monkeypatch.setattr( + "app.services.crucible_svc.CrucibleService.get_metrics_graph", fake_get + ) + query = None + if name or period or agg or title: + query = {} + if name: + query["name"] = name + if period: + query["period"] = period + if agg: + query["aggregate"] = agg + if title: + query["title"] = title + response = client.get("/api/v1/ilab/runs/r1/graph/source::type", params=query) + assert response.json() == expected + assert response.status_code == 200 diff --git a/backend/tests/utilities/anointed.txt b/backend/tests/utilities/anointed.txt new file mode 100644 index 00000000..3454134f --- /dev/null +++ b/backend/tests/utilities/anointed.txt @@ -0,0 +1,5 @@ +26ad48c1-fc9c-404d-bccf-d19755ca8a39 +20a15c35-22ac-482f-b209-4b8ecc018a26 +0d78ece9-817c-41aa-be19-e90d53924206 +1878b7d2-9195-4104-8bd0-13d31a8f5524 +1c756d89-3aef-445b-b71b-99b8916f8537 diff --git a/backend/tests/utilities/clone_ilab.py b/backend/tests/utilities/clone_ilab.py new file mode 100755 index 00000000..6361b76c --- /dev/null +++ b/backend/tests/utilities/clone_ilab.py @@ -0,0 +1,185 @@ +"""Clone a live Crucible CDM v7 Opensearch instance + +This is used to bootstrap a functional test environment by starting with a live +CDM database. + +E.g.: + python3 tests/utilities/clone_ilab.py \ + http://n42-h01-b01-mx750c.rdu3.labs.perfscale.redhat.com:9200 \ + http://localhost:9200 \ + --ids tests/utilities/anointed.txt + +TODO: Update to handle CDM v8 +""" + +import argparse +from pathlib import Path +import sys +import time +from typing import Any, Iterator, Optional +from elasticsearch import Elasticsearch + +indices = ( + "run", + "iteration", + "sample", + "period", + "tag", + "param", + "metric_desc", +) + + +def index(root: str) -> str: + return f"cdmv7dev-{root}" + + +def hits( + payload: dict[str, Any], fields: Optional[list[str]] = None +) -> Iterator[dict[str, Any]]: + """Helper to iterate through OpenSearch query matches + + Iteratively yields the "_source" of each hit. As a convenience, can + yield a sub-object of "_source" ... for example, specifying the + optional "fields" as ["metric_desc", "id"] will yield the equivalent of + hit["_source"]["metric_desc"]["id"] + + Args: + payload: OpenSearch reponse payload + fields: Optional sub-fields of "_source" + + Returns: + Yields each object from the "greatest hits" list + """ + if "hits" not in payload: + raise Exception(f"Attempt to iterate hits for {payload}") + hits = payload.get("hits", {}).get("hits", []) + for h in hits: + source = h["_source"] + if fields: + for f in fields: + source = source[f] + yield source + + +def clone(source_server: str, target_server: str, ids: list[str]): + target = Elasticsearch(target_server) + source = Elasticsearch(source_server) + + if ids: + query = {"query": {"bool": {"filter": {"terms": {"run.id": ids}}}}} + else: + query = None + for i in indices: + start = time.time() + name = index(i) + print(f"Migrating {name}") + r = target.indices.delete(name, ignore_unavailable=True) + if not r["acknowledged"]: + print(f"delete {name} failed: {r}", file=sys.stderr) + continue + if target.indices.exists_template(name): + r = target.indices.delete_template(name) + if not r.get("acknowledged"): + print(f"Problem deleting template {name}: {r}", file=sys.stderr) + continue + response = source.indices.get_template(name=name) + template = response.get(name) + if not template: + print(f"Problem getting template {name}: {response}") + continue + r = target.indices.put_template(name, body=template) + if not r.get("acknowledged"): + print(f"Problem deleting template {name}: {r}", file=sys.stderr) + continue + body = { + "source": {"remote": {"host": source_server}, "index": name}, + "dest": {"index": name}, + "size": 250000 + } + if query: + body["source"].update(query) + r = target.reindex( + body=body, + timeout="5m", + request_timeout=1000.0, + ) + if r.get("failures"): + print(f"Problem reindexing (cloning) {name}: {r}", file=sys.stderr) + continue + print( + f"Done migrating {name} ({r.get('total')} documents): {time.time()-start:.3f} seconds" + ) + + # We have to handle metric_data differently, because those documents don't + # have run subdocuments. Instead, we need to collect all of the metric_desc + # IDs for the runs we've transferred (a query against the target), and then + # use that list to transfer all associated metric_desc documents. + # + # We'll just get all metric_desc documents -- if we're cloning a set, the + # target has only the selected runs. + idx = index("metric_desc") + name = index("metric_data") + r = target.search(size=250000, index=idx) + metrics = [h["metric_desc"]["id"] for h in hits(r)] + if len(metrics) == 0: + print(f"funky: {r}") + print(f"Migrating {name} for {len(metrics)} metrics") + start = time.time() + r = target.reindex( + body={ + "source": { + "query": {"bool": {"filter": {"terms": {"metric_desc.id": metrics}}}}, + "remote": {"host": source_server}, + "index": name, + }, + "dest": {"index": name}, + "size": 250000 + }, + timeout="5m", + request_timeout=1000.0, + ) + if r.get("failures"): + print(f"Problem reindexing (cloning) {name}: {r}", file=sys.stderr) + print( + f"Done migrating {name} ({r.get('total')} documents): {time.time()-start:.3f} seconds" + ) + + +parser = argparse.ArgumentParser("clone") +parser.add_argument("source", help="Crucible Opensearch server address") +parser.add_argument("target", help="Functional test Opensearch server address") +parser.add_argument( + "-i", + "--ids", + dest="ids", + help="Read a list of run IDs from the named file", +) +parser.add_argument( + "-v", + "--verbose", + dest="verbose", + action="count", + default=0, + help="Give progress feedback", +) +parser.add_argument("--version", action="version", version="%(prog)s 0.1") + +args = parser.parse_args() +print(f"SOURCE: {args.source}") +print(f"TARGET: {args.target}") + +try: + ids = [] + if args.ids: + try: + ids = Path(args.ids).read_text().splitlines() + except FileNotFoundError: + print(f"File {args.ids} not found", file=sys.stderr) + sys.exit(1) + print(f"Annointed runs: {ids}") + clone(args.source, args.target, ids) + sys.exit(0) +except Exception as exc: + print(f"Something smells odd: {str(exc)!r}") + sys.exit(1) diff --git a/backend/tests/utilities/diagnose.py b/backend/tests/utilities/diagnose.py new file mode 100755 index 00000000..b0e30683 --- /dev/null +++ b/backend/tests/utilities/diagnose.py @@ -0,0 +1,349 @@ +from collections import defaultdict +from dataclasses import dataclass, field +import datetime +import sys +from threading import Thread +import time +from typing import Any, Iterator, Optional, Union +from elasticsearch import Elasticsearch +import argparse + + +@dataclass +class Info: + id: str + good: bool = True + errors: dict[str, int] = field(default_factory=lambda: defaultdict(int)) + begin: int = 0 + end: int = 0 + benchmark: str = "" + tags: dict[str, str] = field(default_factory=lambda: defaultdict(str)) + iterations: int = 0 + params: dict[int, dict[str, str]] = field( + default_factory=lambda: defaultdict(lambda: defaultdict(str)) + ) + samples: int = 0 + periods: int = 0 + primary: set[str] = field(default_factory=set) + metrics: dict[str, int] = field(default_factory=lambda: defaultdict(int)) + points: int = 0 + + +class Verify: + """Encapsulate -v status messages.""" + + def __init__(self, verify: Union[bool, int]): + """Initialize the object. + + Args: + verify: True to write status messages. + """ + if isinstance(verify, int): + self.verify = verify + else: + self.verify = 1 if verify else 0 + + def __bool__(self) -> bool: + """Report whether verification is enabled. + + Returns: + True if verification is enabled. + """ + return bool(self.verify) + + def status(self, message: str, level: int = 1): + """Write a message if verification is enabled. + + Args: + message: status string + """ + if self.verify >= level: + ts = datetime.datetime.now().astimezone() + print(f"({ts:%H:%M:%S}) {message}", file=sys.stderr) + + +class Watch: + """Encapsulate a periodic status update. + + The active message can be updated at will; a background thread will + periodically print the most recent status. + """ + + def __init__(self, interval: float): + """Initialize the object. + + Args: + interval: interval in seconds for status updates + """ + self.start = time.time() + self.interval = interval + self.status = "starting" + if interval: + self.thread = Thread(target=self.watcher) + self.thread.setDaemon(True) + self.thread.start() + + def update(self, status: str): + """Update status if appropriate. + + Update the message to be printed at the next interval, if progress + reporting is enabled. + + Args: + status: status string + """ + self.status = status + + def watcher(self): + """A worker thread to periodically write status messages.""" + + while True: + time.sleep(self.interval) + now = time.time() + delta = int(now - self.start) + hours, remainder = divmod(delta, 3600) + minutes, seconds = divmod(remainder, 60) + print( + f"[{hours:02d}:{minutes:02d}:{seconds:02d}] {self.status}", + file=sys.stderr, + ) + + +watcher: Optional[Watch] = None +verifier: Optional[Verify] = None + + +def index(root: str) -> str: + return f"cdmv7dev-{root}" + + +def hits( + payload: dict[str, Any], fields: Optional[list[str]] = None +) -> Iterator[dict[str, Any]]: + """Helper to iterate through OpenSearch query matches + + Iteratively yields the "_source" of each hit. As a convenience, can + yield a sub-object of "_source" ... for example, specifying the + optional "fields" as ["metric_desc", "id"] will yield the equivalent of + hit["_source"]["metric_desc"]["id"] + + Args: + payload: OpenSearch reponse payload + fields: Optional sub-fields of "_source" + + Returns: + Yields each object from the "greatest hits" list + """ + if "hits" not in payload: + raise Exception(f"Attempt to iterate hits for {payload}") + hits = payload.get("hits", {}).get("hits", []) + for h in hits: + source = h["_source"] + if fields: + for f in fields: + source = source[f] + yield source + + +def diagnose(cdm: Elasticsearch, args: argparse.Namespace): + start = time.time() + runs = {} + watcher.update("finding runs") + rq = cdm.search(index=index("run"), sort=["run.begin:asc"], size=10000) + for r in hits(rq, ["run"]): + info = Info(r["id"], benchmark=r["benchmark"]) + runs[info.id] = info + info.begin = r.get("begin", 0) + info.end = r.get("end", 0) + if not info.begin or not info.end: + info.errors["missing/bad run timestamps"] += 1 + info.good = False + watcher.update(f"finding {info.id} tags") + tq = cdm.search( + index=index("tag"), + body={"query": {"bool": {"filter": {"term": {"run.id": info.id}}}}}, + size=10000, + ) + info.tags = {t["name"]: t["val"] for t in hits(tq, ["tag"])} + watcher.update(f"finding {info.id} iterations") + iq = cdm.search( + index=index("iteration"), + body={"query": {"bool": {"filter": {"term": {"run.id": info.id}}}}}, + size=10000, + ) + info.iterations = len(iq["hits"]["hits"]) + info.primary.update((i["primary-metric"] for i in hits(iq, ["iteration"]))) + watcher.update(f"finding {info.id} params") + pq = cdm.search( + index=index("param"), + body={"query": {"bool": {"filter": {"term": {"run.id": info.id}}}}}, + size=10000, + ) + for p in hits(pq): + i = p["iteration"]["num"] + param = p["param"] + info.params[i][param["arg"]] = param["val"] + watcher.update(f"finding {info.id} samples") + sq = cdm.search( + index=index("sample"), + body={"query": {"bool": {"filter": {"term": {"run.id": info.id}}}}}, + size=10000, + ) + info.samples = len(sq["hits"]["hits"]) + watcher.update(f"finding {info.id} periods") + pq = cdm.search( + index=index("period"), + body={"query": {"bool": {"filter": {"term": {"run.id": info.id}}}}}, + size=10000, + ) + info.periods = len(iq["hits"]["hits"]) + for p in hits(pq, ["period"]): + b = p.get("begin") + e = p.get("end") + if not b or not e: + info.errors[f"period {p['name']}: missing/bad timestamps"] += 1 + info.good = False + watcher.update(f"finding {info.id} metrics") + mq = cdm.search( + index=index("metric_desc"), + body={"query": {"bool": {"filter": {"term": {"run.id": info.id}}}}}, + size=10000, + ) + metrics = {} + for m in hits(mq, ["metric_desc"]): + name = f"{m['source']}::{m['type']}" + metrics[m["id"]] = name + info.metrics[name] = 0 + watcher.update(f"finding {info.id} metric data") + dq = cdm.search( + index=index("metric_data"), + body={ + "query": { + "bool": { + "filter": {"terms": {"metric_desc.id": list(metrics.keys())}} + } + } + }, + size=100000, + ) + for d in hits(dq): + id = d["metric_desc"]["id"] + data = d["metric_data"] + info.metrics[metrics[id]] += 1 + if not data.get("begin") or not data.get("end"): + info.good = False + info.errors[f"metric {metrics[id]} sample missing timestamp"] += 1 + if "duration" not in data: + info.good = False + info.errors[f"metric {metrics[id]} sample missing duration"] += 1 + if "value" not in data: + info.good = False + info.errors[f"metric {metrics[id]} sample missing value"] += 1 + + watcher.update(f"generating report") + baddies = 0 + marks = defaultdict(int) + first = True + for run in runs.values(): + if not run.good: + baddies += 1 + if (run.good and args.bad) or (not run.good and args.good): + continue + marks[run.benchmark] += 1 + if args.id: + print(run.id) + continue + t = datetime.datetime.fromtimestamp( + int(run.begin) / 1000.0, tz=datetime.timezone.utc + ) + if not args.detail: + if first: + first = False + print( + f"{'Run ID':<36s} {'Benchmark':<10s} {'Start time':<16s} It Sa Pd Errors Primary" + ) + print( + f"{'':-<36s} {'':-<10s} {'':-<16s} {'':-<2s} {'':-<2s} {'':-<2s} {'':-<6s} {'':-<20s}" + ) + print( + f"{run.id:36s} {run.benchmark:10s} {t:%Y-%m-%d %H:%M} " + f"{run.iterations:>2d} {run.samples:>2d} {run.periods:>2d} " + f"{sum(run.errors.values()) if run.errors else 0:>6d} " + f"{','.join(sorted(run.primary))}" + ) + continue + print(f"Run {run.id} ({run.benchmark}@{t:%Y-%m-%d %H:%M})") + print(f" Tags: {','.join(f'{k}={v}' for k, v in run.tags.items())}") + print( + f" {run.iterations} iterations: primary metrics {', '.join(sorted(run.primary))}" + ) + if run.params: + print(" Iteration params:") + for i, p in run.params.items(): + for k, v in p.items(): + print(f" {i:>2d} {k}={v}") + print(f" {run.samples} samples") + print(f" {run.periods} periods") + if run.metrics: + print(" Metrics:") + for m in sorted(run.metrics.keys()): + print(f" {m:>15s}: {run.metrics[m]:5d}") + if not run.good: + print(" Errors:") + for e, i in run.errors.items(): + print(f" ({i:>3d}) {e!r}") + if args.summary or not args.id: + print(f"{len(runs)} runs analyzed: {baddies} are busted") + print("Benchmarks:") + for b in sorted(marks.keys()): + print(f" {b:>10s}: {marks[b]:5d}") + print(f"Analysis took {time.time() - start:03f} seconds") + + +parser = argparse.ArgumentParser("diagnose") +parser.add_argument("server", help="CDM v7 Opensearch server address") +parser.add_argument( + "-b", "--bad-only", dest="bad", action="store_true", help="Only report bad runs" +) +parser.add_argument( + "-g", "--good-only", dest="good", action="store_true", help="Only report good runs" +) +parser.add_argument( + "-i", + "--id-only", + dest="id", + action="store_true", + help="Report just IDs (with good-only or bad-only)", +) +parser.add_argument( + "-d", "--detail", dest="detail", action="store_true", help="Report detail on runs" +) +parser.add_argument( + "-s", + "--summary", + dest="summary", + action="store_true", + help="Print summary statistics", +) +parser.add_argument( + "-v", + "--verbose", + dest="verbose", + action="count", + default=0, + help="Give progress feedback", +) +parser.add_argument("--version", action="version", version="%(prog)s 0.1") + +args = parser.parse_args() + +verifier = Verify(args.verbose) +watcher = Watch(60.0 / args.verbose if args.verbose else 0) + +try: + cdm = Elasticsearch(args.server) + diagnose(cdm, args) + sys.exit(0) +except Exception as exc: + print(f"Something smells odd: {str(exc)!r}") + sys.exit(1) diff --git a/frontend/README.md b/frontend/README.md index 0b01bbaf..99101f03 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -21,21 +21,21 @@ The `utils` directory has all helper/utility scripts. #### [`src/reducers`](src/reducers) -Contains functions that manage store via actions +Contains functions that manage store via actions -## Cloning and Running the Application Locally +## Cloning and Running the Application Locally -- Install [Node.js](https://nodejs.org) +- Install [Node.js](https://nodejs.org) - Clone the [CPT Dashboard code](https://github.com/cloud-bulldozer/cpt-dashboard) to a local file system - Install all the npm packages -Type the following command to install all npm packages +Type the following command to install all npm packages ```bash $ npm install ``` -In order to run the application use the following command +In order to run the application use the following command ```bash $ npm run dev @@ -56,12 +56,12 @@ Then, copy the `build` folder to the proper place on the server for deployment. ## Template -This application is based on v5 of PatternFly which is a production-ready UI solution for admin interfaces. For more information regarding the foundation and template of the application, please visit [PatternFly](https://www.patternfly.org/get-started/develop) +This application is based on v5 of PatternFly which is a production-ready UI solution for admin interfaces. For more information regarding the foundation and template of the application, please visit [PatternFly](https://www.patternfly.org/get-started/develop) ## Resources -- [Vite](https://vitejs.dev/guide/) +- [Vite](https://vitejs.dev/guide/) -- [ReactJS](https://reactjs.org/) +- [ReactJS](https://reactjs.org/) - [React-Redux](https://github.com/reduxjs/react-redux) diff --git a/frontend/src/App.js b/frontend/src/App.js deleted file mode 100644 index 4b8c6382..00000000 --- a/frontend/src/App.js +++ /dev/null @@ -1,58 +0,0 @@ -import React, {useEffect} from 'react'; -import '@patternfly/react-core/dist/styles/base.css'; - -import { - Page, - PageSection, - PageSectionVariants, -} from '@patternfly/react-core'; -import {fetchOCPJobsData, fetchCPTJobsData, fetchQuayJobsData, fetchTelcoJobsData} from "./store/Actions/ActionCreator"; -import {useDispatch} from "react-redux"; -import {Route, Switch, BrowserRouter as Router} from "react-router-dom"; -import {NavBar} from "./components/NavBar/NavBar"; -import {HomeView} from "./components/Home/HomeView"; -import {OCPHome} from './components/OCP/OCPHome'; -import {QuayHome} from './components/Quay/QuayHome'; -import {TelcoHome} from './components/Telco/TelcoHome'; - - -export const App = () => { - const dispatch = useDispatch() - - useEffect(() => { - const fetchData = async () =>{ - await dispatch(fetchOCPJobsData()) - await dispatch(fetchCPTJobsData()) - await dispatch(fetchQuayJobsData()) - await dispatch(fetchTelcoJobsData()) - } - fetchData() - }, [dispatch]) - - - - - return ( - - } - groupProps={{ - stickyOnBreakpoint: { default: 'top' }, - sticky: 'top' - }} - > - - - - - - - - - - - - ); -}; - -export default App diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index c5f48549..d93c960e 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -5,6 +5,7 @@ import * as APP_ROUTES from "./utils/routeConstants"; import { BrowserRouter, Route, Routes } from "react-router-dom"; import Home from "./components/templates/Home"; +import ILab from "./components/templates/ILab"; import MainLayout from "./containers/MainLayout"; import OCP from "./components/templates/OCP"; import Quay from "./components/templates/Quay"; @@ -26,6 +27,7 @@ function App() { } /> } /> } /> + } /> diff --git a/frontend/src/actions/filterActions.js b/frontend/src/actions/filterActions.js index 7f565887..6385b0bc 100644 --- a/frontend/src/actions/filterActions.js +++ b/frontend/src/actions/filterActions.js @@ -1,3 +1,4 @@ +import { fetchILabJobs, setIlabDateFilter } from "./ilabActions"; import { removeCPTAppliedFilters, setCPTAppliedFilters, @@ -76,6 +77,9 @@ export const setDateFilter = (date, key, navigation, currType) => { dispatch(setQuayDateFilter(date, key, navigation)); } else if (currType === "telco") { dispatch(setTelcoDateFilter(date, key, navigation)); + } else if (currType === "ilab") { + dispatch(setIlabDateFilter(date, key, navigation)); + dispatch(fetchILabJobs(true)); } }; diff --git a/frontend/src/actions/ilabActions.js b/frontend/src/actions/ilabActions.js new file mode 100644 index 00000000..347a5220 --- /dev/null +++ b/frontend/src/actions/ilabActions.js @@ -0,0 +1,311 @@ +import * as API_ROUTES from "@/utils/apiConstants"; +import * as TYPES from "./types.js"; + +import API from "@/utils/axiosInstance"; +import { appendQueryString } from "@/utils/helper"; +import { cloneDeep } from "lodash"; +import { showFailureToast } from "@/actions/toastActions"; + +export const fetchILabJobs = + (shouldStartFresh = false) => + async (dispatch, getState) => { + try { + dispatch({ type: TYPES.LOADING }); + const { start_date, end_date, size, offset, results } = getState().ilab; + const response = await API.get(API_ROUTES.ILABS_JOBS_API_V1, { + params: { + ...(start_date && { start_date }), + ...(end_date && { end_date }), + ...(size && { size }), + ...(offset && { offset }), + }, + }); + if (response.status === 200) { + const startDate = response.data.startDate, + endDate = response.data.endDate; + dispatch({ + type: TYPES.SET_ILAB_JOBS_DATA, + payload: shouldStartFresh + ? response.data.results + : [...results, ...response.data.results], + }); + + dispatch({ + type: TYPES.SET_ILAB_DATE_FILTER, + payload: { + start_date: startDate, + end_date: endDate, + }, + }); + + dispatch({ + type: TYPES.SET_ILAB_TOTAL_ITEMS, + payload: response.data.total, + }); + dispatch({ + type: TYPES.SET_ILAB_OFFSET, + payload: response.data.next_offset, + }); + + dispatch(tableReCalcValues()); + } + } catch (error) { + dispatch(showFailureToast()); + } + dispatch({ type: TYPES.COMPLETED }); + }; +export const sliceIlabTableRows = + (startIdx, endIdx) => (dispatch, getState) => { + const results = [...getState().ilab.results]; + + dispatch({ + type: TYPES.SET_ILAB_INIT_JOBS, + payload: results.slice(startIdx, endIdx), + }); + }; +export const setIlabDateFilter = + (start_date, end_date, navigate) => (dispatch, getState) => { + const appliedFilters = getState().ilab.appliedFilters; + + dispatch({ + type: TYPES.SET_ILAB_DATE_FILTER, + payload: { + start_date, + end_date, + }, + }); + + appendQueryString({ ...appliedFilters, start_date, end_date }, navigate); + }; + +export const fetchMetricsInfo = (uid) => async (dispatch) => { + try { + dispatch({ type: TYPES.LOADING }); + const response = await API.get(`/api/v1/ilab/runs/${uid}/metrics`); + if (response.status === 200) { + if ( + response.data.constructor === Object && + Object.keys(response.data).length > 0 + ) { + dispatch({ + type: TYPES.SET_ILAB_METRICS, + payload: { uid, metrics: Object.keys(response.data) }, + }); + } + } + } catch (error) { + console.error(error); + dispatch(showFailureToast()); + } + dispatch({ type: TYPES.COMPLETED }); +}; + +export const fetchPeriods = (uid) => async (dispatch) => { + try { + dispatch({ type: TYPES.LOADING }); + const response = await API.get(`/api/v1/ilab/runs/${uid}/periods`); + if (response.status === 200) { + dispatch({ + type: TYPES.SET_ILAB_PERIODS, + payload: { uid, periods: response.data }, + }); + } + } catch (error) { + console.error( + `ERROR (${error?.response?.status}): ${JSON.stringify( + error?.response?.data + )}` + ); + dispatch(showFailureToast()); + } + dispatch({ type: TYPES.COMPLETED }); +}; + +export const fetchGraphData = + (uid, metric = null) => + async (dispatch, getState) => { + try { + const periods = getState().ilab.periods.find((i) => i.uid == uid); + const graphData = cloneDeep(getState().ilab.graphData); + const filterData = graphData.filter((i) => i.uid !== uid); + dispatch({ + type: TYPES.SET_ILAB_GRAPH_DATA, + payload: filterData, + }); + const copyData = cloneDeep(filterData); + dispatch({ type: TYPES.GRAPH_LOADING }); + let graphs = []; + periods?.periods?.forEach((p) => { + graphs.push({ metric: p.primary_metric, periods: [p.id] }); + if (metric) { + graphs.push({ + metric, + aggregate: true, + periods: [p.id], + }); + } + }); + const response = await API.post(`/api/v1/ilab/runs/multigraph`, { + run: uid, + name: `graph ${uid}`, + graphs, + }); + if (response.status === 200) { + copyData.push({ + uid, + data: response.data.data, + layout: response.data.layout, + }); + dispatch({ + type: TYPES.SET_ILAB_GRAPH_DATA, + payload: copyData, + }); + } + } catch (error) { + console.error( + `ERROR (${error?.response?.status}): ${JSON.stringify( + error?.response?.data + )}` + ); + dispatch(showFailureToast()); + } + dispatch({ type: TYPES.GRAPH_COMPLETED }); + }; + +export const handleMultiGraph = (uids) => async (dispatch, getState) => { + try { + const periods = getState().ilab.periods; + const pUids = periods.map((i) => i.uid); + + const missingPeriods = uids.filter(function (x) { + return pUids.indexOf(x) < 0; + }); + + await Promise.all( + missingPeriods.map(async (uid) => { + await dispatch(fetchPeriods(uid)); // Dispatch each item + }) + ); + + dispatch(fetchMultiGraphData(uids)); + } catch (error) { + console.error( + `ERROR (${error?.response?.status}): ${JSON.stringify( + error?.response?.data + )}` + ); + dispatch(showFailureToast()); + } +}; +export const fetchMultiGraphData = (uids) => async (dispatch, getState) => { + try { + dispatch({ type: TYPES.LOADING }); + const periods = getState().ilab.periods; + const filterPeriods = periods.filter((item) => uids.includes(item.uid)); + + let graphs = []; + uids.forEach(async (uid) => { + const periods = filterPeriods.find((i) => i.uid == uid); + periods?.periods?.forEach((p) => { + graphs.push({ + run: uid, + metric: p.primary_metric, + periods: [p.id], + }); + // graphs.push({ + // run: uid, + // metric, + // aggregate: true, + // periods: [p.id], + // }); + }); + }); + console.log(graphs); + const response = await API.post(`/api/v1/ilab/runs/multigraph`, { + name: "comparison", + relative: true, + graphs, + }); + if (response.status === 200) { + response.data.layout["showlegend"] = true; + response.data.layout["responsive"] = "true"; + response.data.layout["autosize"] = "true"; + response.data.layout["legend"] = { x: 0, y: 1.5 }; + const graphData = []; + graphData.push({ + data: response.data.data, + layout: response.data.layout, + }); + dispatch({ + type: TYPES.SET_ILAB_MULTIGRAPH_DATA, + payload: graphData, + }); + } + } catch (error) { + console.error( + `ERROR (${error?.response?.status}): ${JSON.stringify( + error?.response?.data + )}` + ); + dispatch(showFailureToast()); + } + dispatch({ type: TYPES.COMPLETED }); +}; + +export const setIlabPage = (pageNo) => ({ + type: TYPES.SET_ILAB_PAGE, + payload: pageNo, +}); + +export const setIlabPageOptions = (page, perPage) => ({ + type: TYPES.SET_ILAB_PAGE_OPTIONS, + payload: { page, perPage }, +}); + +export const checkIlabJobs = (newPage) => (dispatch, getState) => { + const results = cloneDeep(getState().ilab.results); + const { totalItems, perPage } = getState().ilab; + + const startIdx = (newPage - 1) * perPage; + const endIdx = newPage * perPage; + + if ( + (typeof results[startIdx] === "undefined" || + typeof results[endIdx] === "undefined") && + results.length < totalItems + ) { + dispatch(fetchILabJobs()); + } +}; + +export const setSelectedMetrics = (id, metrics) => (dispatch, getState) => { + const metrics_selected = cloneDeep(getState().ilab.metrics_selected); + metrics_selected[id] = metrics; + dispatch({ + type: TYPES.SET_ILAB_SELECTED_METRICS, + payload: metrics_selected, + }); +}; + +export const tableReCalcValues = () => (dispatch, getState) => { + const { page, perPage } = getState().ilab; + + const startIdx = page !== 1 ? (page - 1) * perPage : 0; + const endIdx = page !== 1 ? page * perPage - 1 : perPage; + dispatch(sliceIlabTableRows(startIdx, endIdx)); + dispatch(getMetaRowdId()); +}; + +export const getMetaRowdId = () => (dispatch, getState) => { + const tableData = getState().ilab.tableData; + const metaId = tableData.map((item) => `metadata-toggle-${item.id}`); + dispatch(setMetaRowExpanded(metaId)); +}; +export const toggleComparisonSwitch = () => ({ + type: TYPES.TOGGLE_COMPARISON_SWITCH, +}); + +export const setMetaRowExpanded = (expandedItems) => ({ + type: TYPES.SET_EXPANDED_METAROW, + payload: expandedItems, +}); diff --git a/frontend/src/actions/paginationActions.js b/frontend/src/actions/paginationActions.js index 80a7dff1..1717a82a 100644 --- a/frontend/src/actions/paginationActions.js +++ b/frontend/src/actions/paginationActions.js @@ -3,9 +3,17 @@ import { setCPTPageOptions, sliceCPTTableRows, } from "./homeActions"; +import { + setIlabPage, + setIlabPageOptions, + sliceIlabTableRows, +} from "./ilabActions"; import { setOCPPage, setOCPPageOptions, sliceOCPTableRows } from "./ocpActions"; import { setQuayPage, setQuayPageOptions } from "./quayActions"; import { setTelcoPage, setTelcoPageOptions } from "./telcoActions"; + +import { checkIlabJobs } from "./ilabActions"; + export const setPage = (newPage, currType) => (dispatch) => { if (currType === "cpt") { dispatch(setCPTPage(newPage)); @@ -15,6 +23,8 @@ export const setPage = (newPage, currType) => (dispatch) => { dispatch(setQuayPage(newPage)); } else if (currType === "telco") { dispatch(setTelcoPage(newPage)); + } else if (currType === "ilab") { + dispatch(setIlabPage(newPage)); } }; @@ -27,6 +37,8 @@ export const setPageOptions = (newPage, newPerPage, currType) => (dispatch) => { dispatch(setQuayPageOptions(newPage, newPerPage)); } else if (currType === "telco") { dispatch(setTelcoPageOptions(newPage, newPerPage)); + } else if (currType === "ilab") { + dispatch(setIlabPageOptions(newPage, newPerPage)); } }; @@ -35,5 +47,11 @@ export const sliceTableRows = (startIdx, endIdx, currType) => (dispatch) => { dispatch(sliceCPTTableRows(startIdx, endIdx)); } else if (currType === "ocp") { dispatch(sliceOCPTableRows(startIdx, endIdx)); + } else if (currType === "ilab") { + dispatch(sliceIlabTableRows(startIdx, endIdx)); } }; + +export const fetchNextJobs = (newPage) => (dispatch) => { + dispatch(checkIlabJobs(newPage)); +}; diff --git a/frontend/src/actions/types.js b/frontend/src/actions/types.js index 1804cf21..f7e21fec 100644 --- a/frontend/src/actions/types.js +++ b/frontend/src/actions/types.js @@ -77,3 +77,18 @@ export const SET_TELCO_SELECTED_FILTERS = "SET_TELCO_SELECTED_FILTERS"; export const SET_TELCO_SUMMARY = "SET_TELCO_SUMMARY"; export const SET_TELCO_COLUMNS = "SET_TELCO_COLUMNS"; export const SET_TELCO_GRAPH_DATA = "SET_TELCO_GRAPH_DATA"; +/* ILAB JOBS */ +export const SET_ILAB_JOBS_DATA = "SET_ILAB_JOBS_DATA"; +export const SET_ILAB_DATE_FILTER = "SET_ILAB_DATE_FILTER"; +export const SET_ILAB_GRAPH_DATA = "SET_ILAB_GRAPH_DATA"; +export const SET_ILAB_MULTIGRAPH_DATA = "SET_ILAB_MULTIGRAPH_DATA"; +export const SET_ILAB_TOTAL_ITEMS = "SET_ILAB_TOTAL_ITEMS"; +export const SET_ILAB_OFFSET = "SET_ILAB_OFFSET"; +export const SET_ILAB_PAGE = "SET_ILAB_PAGE"; +export const SET_ILAB_PAGE_OPTIONS = "SET_ILAB_PAGE_OPTIONS"; +export const SET_ILAB_METRICS = "SET_ILAB_METRICS"; +export const SET_ILAB_SELECTED_METRICS = "SET_ILAB_SELECTED_METRICS"; +export const SET_ILAB_PERIODS = "SET_ILAB_PERIODS"; +export const SET_ILAB_INIT_JOBS = "SET_ILAB_INIT_JOBS"; +export const TOGGLE_COMPARISON_SWITCH = "TOGGLE_COMPARISON_SWITCH"; +export const SET_EXPANDED_METAROW = "SET_EXPANDED_METAROW"; diff --git a/frontend/src/assets/constants/SidemenuConstants.js b/frontend/src/assets/constants/SidemenuConstants.js index bc04fd52..e65a2103 100644 --- a/frontend/src/assets/constants/SidemenuConstants.js +++ b/frontend/src/assets/constants/SidemenuConstants.js @@ -2,3 +2,4 @@ export const HOME_NAV = "home"; export const QUAY_NAV = "quay"; export const OCP_NAV = "ocp"; export const TELCO_NAV = "telco"; +export const ILAB_NAV = "ilab"; diff --git a/frontend/src/components/molecules/ExpandedRow/index.jsx b/frontend/src/components/molecules/ExpandedRow/index.jsx index 981d5660..8fcc2d48 100644 --- a/frontend/src/components/molecules/ExpandedRow/index.jsx +++ b/frontend/src/components/molecules/ExpandedRow/index.jsx @@ -42,7 +42,7 @@ const RowContent = (props) => { }, []); return ( - + {content.map((unit) => ( diff --git a/frontend/src/components/molecules/SideMenuOptions/index.jsx b/frontend/src/components/molecules/SideMenuOptions/index.jsx index 48bed8de..17a00160 100644 --- a/frontend/src/components/molecules/SideMenuOptions/index.jsx +++ b/frontend/src/components/molecules/SideMenuOptions/index.jsx @@ -28,6 +28,11 @@ const sideMenuOptions = [ key: "telco", displayName: "Telco", }, + { + id: CONSTANTS.ILAB_NAV, + key: "ilab", + displayName: "ILAB", + }, ]; const MenuOptions = () => { diff --git a/frontend/src/components/organisms/Pagination/index.jsx b/frontend/src/components/organisms/Pagination/index.jsx index 7b316a21..deb8d8fe 100644 --- a/frontend/src/components/organisms/Pagination/index.jsx +++ b/frontend/src/components/organisms/Pagination/index.jsx @@ -1,5 +1,6 @@ import { Pagination, PaginationVariant } from "@patternfly/react-core"; import { + fetchNextJobs, setPage, setPageOptions, sliceTableRows, @@ -13,6 +14,7 @@ const RenderPagination = (props) => { const dispatch = useDispatch(); const perPageOptions = [ + { title: "10", value: 10 }, { title: "25", value: 25 }, { title: "50", value: 50 }, { title: "100", value: 100 }, @@ -21,6 +23,7 @@ const RenderPagination = (props) => { const onSetPage = useCallback( (_evt, newPage, _perPage, startIdx, endIdx) => { dispatch(setPage(newPage, props.type)); + dispatch(sliceTableRows(startIdx, endIdx, props.type)); }, [dispatch, props.type] @@ -28,11 +31,17 @@ const RenderPagination = (props) => { const onPerPageSelect = useCallback( (_evt, newPerPage, newPage, startIdx, endIdx) => { dispatch(setPageOptions(newPage, newPerPage, props.type)); + dispatch(sliceTableRows(startIdx, endIdx, props.type)); }, [dispatch, props.type] ); + const checkAndFetch = (_evt, newPage) => { + if (props.type === "ilab") { + dispatch(fetchNextJobs(newPage)); + } + }; return ( { perPage={props.perPage} page={props.page} variant={PaginationVariant.bottom} + onNextClick={checkAndFetch} perPageOptions={perPageOptions} onSetPage={onSetPage} onPerPageSelect={onPerPageSelect} + onPageInput={checkAndFetch} /> ); }; diff --git a/frontend/src/components/organisms/TableFilters/index.jsx b/frontend/src/components/organisms/TableFilters/index.jsx index c5f5ae62..cec3fee0 100644 --- a/frontend/src/components/organisms/TableFilters/index.jsx +++ b/frontend/src/components/organisms/TableFilters/index.jsx @@ -5,6 +5,7 @@ import "./index.less"; import { Chip, ChipGroup, + Switch, Toolbar, ToolbarContent, ToolbarItem, @@ -39,6 +40,8 @@ const TableFilter = (props) => { setColumns, selectedFilters, updateSelectedFilter, + onSwitchChange, + isSwitchChecked, } = props; const category = @@ -66,7 +69,7 @@ const TableFilter = (props) => { setDateFilter(date, key, navigation, type); }; const endDateChangeHandler = (date, key) => { - setDateFilter(key, date, navigation, type); + setDateFilter(date, key, navigation, type); }; return ( @@ -123,8 +126,21 @@ const TableFilter = (props) => { )} + {type === "ilab" && ( + + + + + + )} - {Object.keys(appliedFilters).length > 0 && + {appliedFilters && + Object.keys(appliedFilters).length > 0 && Object.keys(appliedFilters).map((key) => ( {getFilterName(key)} : @@ -153,5 +169,7 @@ TableFilter.propTypes = { selectedFilters: PropTypes.array, updateSelectedFilter: PropTypes.func, navigation: PropTypes.func, + isSwitchChecked: PropTypes.bool, + onSwitchChange: PropTypes.func, }; export default TableFilter; diff --git a/frontend/src/components/organisms/TableFilters/index.less b/frontend/src/components/organisms/TableFilters/index.less index b100a012..1a479703 100644 --- a/frontend/src/components/organisms/TableFilters/index.less +++ b/frontend/src/components/organisms/TableFilters/index.less @@ -11,4 +11,8 @@ .to-text { padding: 5px 0; } + #comparison-switch { + margin-left: auto; + align-content: center; + } } \ No newline at end of file diff --git a/frontend/src/components/templates/ILab/ILabGraph.jsx b/frontend/src/components/templates/ILab/ILabGraph.jsx new file mode 100644 index 00000000..c41300ba --- /dev/null +++ b/frontend/src/components/templates/ILab/ILabGraph.jsx @@ -0,0 +1,44 @@ +import Plot from "react-plotly.js"; +import PropType from "prop-types"; +import { cloneDeep } from "lodash"; +import { uid } from "@/utils/helper"; +import { useSelector } from "react-redux"; + +const ILabGraph = (props) => { + const { item } = props; + const isGraphLoading = useSelector((state) => state.loading.isGraphLoading); + const { graphData } = useSelector((state) => state.ilab); + + const graphDataCopy = cloneDeep(graphData); + + const getGraphData = (id) => { + const data = graphDataCopy?.filter((a) => a.uid === id); + return data; + }; + const hasGraphData = (uuid) => { + const hasData = getGraphData(uuid).length > 0; + + return hasData; + }; + + return ( + <> + {hasGraphData(item.id) ? ( + + ) : isGraphLoading && !hasGraphData(item.id) ? ( +
+ ) : ( + <> + )} + + ); +}; + +ILabGraph.propTypes = { + item: PropType.object, +}; +export default ILabGraph; diff --git a/frontend/src/components/templates/ILab/IlabCompareComponent.jsx b/frontend/src/components/templates/ILab/IlabCompareComponent.jsx new file mode 100644 index 00000000..c062be13 --- /dev/null +++ b/frontend/src/components/templates/ILab/IlabCompareComponent.jsx @@ -0,0 +1,117 @@ +import "./index.less"; + +import { + Button, + Menu, + MenuContent, + MenuItem, + MenuItemAction, + MenuList, + Title, +} from "@patternfly/react-core"; +import { useDispatch, useSelector } from "react-redux"; + +import { InfoCircleIcon } from "@patternfly/react-icons"; +import Plot from "react-plotly.js"; +import PropTypes from "prop-types"; +import RenderPagination from "@/components/organisms/Pagination"; +import { cloneDeep } from "lodash"; +import { handleMultiGraph } from "@/actions/ilabActions.js"; +import { uid } from "@/utils/helper"; +import { useState } from "react"; + +const IlabCompareComponent = () => { + // const { data } = props; + const { page, perPage, totalItems, tableData } = useSelector( + (state) => state.ilab + ); + const dispatch = useDispatch(); + const [selectedItems, setSelectedItems] = useState([]); + const { multiGraphData } = useSelector((state) => state.ilab); + const isGraphLoading = useSelector((state) => state.loading.isGraphLoading); + const graphDataCopy = cloneDeep(multiGraphData); + + const onSelect = (_event, itemId) => { + const item = itemId; + if (selectedItems.includes(item)) { + setSelectedItems(selectedItems.filter((id) => id !== item)); + } else { + setSelectedItems([...selectedItems, item]); + } + }; + const dummy = () => { + dispatch(handleMultiGraph(selectedItems)); + }; + return ( +
+
+ + Metrics + + + + + + {tableData.map((item) => { + return ( + } + actionId="code" + onClick={() => console.log("clicked on code icon")} + aria-label="Code" + /> + } + > + {`${new Date(item.begin_date).toLocaleDateString()} ${ + item.primary_metrics[0] + }`} + + ); + })} + + + + +
+
+ {isGraphLoading ? ( +
+ ) : graphDataCopy?.length > 0 && + graphDataCopy?.[0]?.data?.length > 0 ? ( +
+ +
+ ) : ( +
No data to compare
+ )} +
+
+ ); +}; + +IlabCompareComponent.propTypes = { + data: PropTypes.array, +}; +export default IlabCompareComponent; diff --git a/frontend/src/components/templates/ILab/IlabExpandedRow.jsx b/frontend/src/components/templates/ILab/IlabExpandedRow.jsx new file mode 100644 index 00000000..bcdbcc33 --- /dev/null +++ b/frontend/src/components/templates/ILab/IlabExpandedRow.jsx @@ -0,0 +1,148 @@ +import { + Accordion, + AccordionContent, + AccordionItem, + AccordionToggle, + Card, + CardBody, +} from "@patternfly/react-core"; +import { useDispatch, useSelector } from "react-redux"; + +import ILabGraph from "./ILabGraph"; +import MetaRow from "./MetaRow"; +import MetricsSelect from "./MetricsDropdown"; +import PropTypes from "prop-types"; +import { setMetaRowExpanded } from "@/actions/ilabActions"; +import { uid } from "@/utils/helper"; + +const IlabRowContent = (props) => { + const { item } = props; + const dispatch = useDispatch(); + const { metaRowExpanded } = useSelector((state) => state.ilab); + + const onToggle = (id) => { + const index = metaRowExpanded.indexOf(id); + const newExpanded = + index >= 0 + ? [ + ...metaRowExpanded.slice(0, index), + ...metaRowExpanded.slice(index + 1, metaRowExpanded.length), + ] + : [...metaRowExpanded, id]; + + dispatch(setMetaRowExpanded(newExpanded)); + }; + return ( + + + { + onToggle(`metadata-toggle-${item.id}`); + }} + isExpanded={metaRowExpanded.includes(`metadata-toggle-${item.id}`)} + id={`metadata-toggle-${item.id}`} + > + Metadata + + + +
+ + + + + + + + + + + + + + {item.iterations.length > 1 && ( + + { + onToggle(`iterations-toggle-${item.id}`); + }} + isExpanded={metaRowExpanded.includes( + `iterations-toggle-${item.id}` + )} + id={`iterations-toggle-${item.id}`} + > + {`Unique parameters for ${item.iterations.length} Iterations`} + + + {item.iterations.map((i) => ( + !(i[0] in item.params) + )} + /> + ))} + + + )} + + +
+
+
+ + { + onToggle(`graph-toggle-${item.id}`); + }} + isExpanded={metaRowExpanded.includes(`graph-toggle-${item.id}`)} + id={`graph-toggle-${item.id}`} + > + Metrics & Graph + + +
Metrics:
+ +
+ +
+
+
+
+ ); +}; +IlabRowContent.propTypes = { + item: PropTypes.object, +}; +export default IlabRowContent; diff --git a/frontend/src/components/templates/ILab/MetaRow.jsx b/frontend/src/components/templates/ILab/MetaRow.jsx new file mode 100644 index 00000000..c196e79f --- /dev/null +++ b/frontend/src/components/templates/ILab/MetaRow.jsx @@ -0,0 +1,40 @@ +import { Table, Tbody, Th, Thead, Tr } from "@patternfly/react-table"; + +import Proptypes from "prop-types"; +import { Title } from "@patternfly/react-core"; +import { uid } from "@/utils/helper"; + +const MetaRow = (props) => { + const { metadata, heading } = props; + return ( + <> + + {heading} + + + + + + + + + + {metadata.map((item) => ( + + + + + ))} + +
+ Key + Value
{item[0]}{item[1]}
+ + ); +}; + +MetaRow.propTypes = { + heading: Proptypes.string, + metadata: Proptypes.array, +}; +export default MetaRow; diff --git a/frontend/src/components/templates/ILab/MetricsDropdown.jsx b/frontend/src/components/templates/ILab/MetricsDropdown.jsx new file mode 100644 index 00000000..04568f0f --- /dev/null +++ b/frontend/src/components/templates/ILab/MetricsDropdown.jsx @@ -0,0 +1,93 @@ +import { + MenuToggle, + Select, + SelectList, + SelectOption, + Skeleton +} from "@patternfly/react-core"; +import { fetchGraphData, setSelectedMetrics } from "@/actions/ilabActions"; +import { useDispatch, useSelector } from "react-redux"; + +import PropTypes from "prop-types"; +import { cloneDeep } from "lodash"; +import { uid } from "@/utils/helper"; +import { useState } from "react"; + +const MetricsSelect = (props) => { + const { metrics, metrics_selected } = useSelector((state) => state.ilab); + const { item } = props; + /* Metrics select */ + const [isOpen, setIsOpen] = useState(false); + const dispatch = useDispatch(); + // const [selected, setSelected] = useState("Select a value"); + + const toggle1 = (toggleRef, selected) => ( + + {selected} + + ); + + const onToggleClick = () => { + setIsOpen(!isOpen); + }; + const onSelect = (_event, value) => { + console.log("selected", value); + const run = value.split("*"); + //setSelected(run[1].trim()); + dispatch(setSelectedMetrics(run[0].trim(), run[1].trim())); + setIsOpen(false); + dispatch(fetchGraphData(run[0].trim(), run[1].trim())); + }; + const metricsDataCopy = cloneDeep(metrics); + + const getMetricsData = (id) => { + const data = metricsDataCopy?.filter((a) => a.uid === id); + return data; + }; + const hasMetricsData = (uuid) => { + const hasData = getMetricsData(uuid).length > 0; + + return hasData; + }; + /* Metrics select */ + return ( + <> + {hasMetricsData(item.id) ? ( + + ): + + } + + ); +}; + +MetricsSelect.propTypes = { + item: PropTypes.object, +}; +export default MetricsSelect; diff --git a/frontend/src/components/templates/ILab/StatusCell.jsx b/frontend/src/components/templates/ILab/StatusCell.jsx new file mode 100644 index 00000000..a4bd208f --- /dev/null +++ b/frontend/src/components/templates/ILab/StatusCell.jsx @@ -0,0 +1,24 @@ +import { + CheckCircleIcon, + ExclamationCircleIcon, +} from "@patternfly/react-icons"; + +import { Label } from "@patternfly/react-core"; +import Proptype from "prop-types"; + +const StatusCell = (props) => { + return props.value?.toLowerCase() === "pass" ? ( + + ) : ( + + ); +}; +StatusCell.propTypes = { + value: Proptype.string, +}; + +export default StatusCell; diff --git a/frontend/src/components/templates/ILab/index.jsx b/frontend/src/components/templates/ILab/index.jsx new file mode 100644 index 00000000..d728b44a --- /dev/null +++ b/frontend/src/components/templates/ILab/index.jsx @@ -0,0 +1,164 @@ +import "./index.less"; + +import { + ExpandableRowContent, + Table, + Tbody, + Td, + Th, + Thead, + Tr, +} from "@patternfly/react-table"; +import { + fetchILabJobs, + fetchMetricsInfo, + fetchPeriods, + setIlabDateFilter, + toggleComparisonSwitch, +} from "@/actions/ilabActions"; +import { formatDateTime, uid } from "@/utils/helper"; +import { useDispatch, useSelector } from "react-redux"; +import { useEffect, useState } from "react"; +import { useNavigate, useSearchParams } from "react-router-dom"; + +import IlabCompareComponent from "./IlabCompareComponent"; +import IlabRowContent from "./IlabExpandedRow"; +import RenderPagination from "@/components/organisms/Pagination"; +import StatusCell from "./StatusCell"; +import TableFilter from "@/components/organisms/TableFilters"; + +const ILab = () => { + const dispatch = useDispatch(); + const navigate = useNavigate(); + const [searchParams] = useSearchParams(); + + const { + start_date, + end_date, + comparisonSwitch, + tableData, + page, + perPage, + totalItems, + } = useSelector((state) => state.ilab); + const [expandedResult, setExpandedResult] = useState([]); + + const isResultExpanded = (res) => expandedResult?.includes(res); + const setExpanded = async (run, isExpanding = true) => { + setExpandedResult((prevExpanded) => { + const otherExpandedRunNames = prevExpanded.filter((r) => r !== run.id); + return isExpanding + ? [...otherExpandedRunNames, run.id] + : otherExpandedRunNames; + }); + if (isExpanding) { + dispatch(fetchPeriods(run.id)); + dispatch(fetchMetricsInfo(run.id)); + } + }; + + useEffect(() => { + if (searchParams.size > 0) { + // date filter is set apart + const startDate = searchParams.get("start_date"); + const endDate = searchParams.get("end_date"); + + searchParams.delete("start_date"); + searchParams.delete("end_date"); + const params = Object.fromEntries(searchParams); + const obj = {}; + for (const key in params) { + obj[key] = params[key].split(","); + } + dispatch(setIlabDateFilter(startDate, endDate, navigate)); + } + }, []); + + useEffect(() => { + dispatch(fetchILabJobs()); + }, [dispatch]); + + const columnNames = { + benchmark: "Benchmark", + email: "Email", + name: "Name", + source: "Source", + metric: "Metric", + begin_date: "Start Date", + end_date: "End Date", + status: "Status", + }; + + const onSwitchChange = () => { + dispatch(toggleComparisonSwitch()); + }; + return ( + <> + + {comparisonSwitch ? ( + + ) : ( + <> + + + + + + + + + + + {tableData.map((item, rowIndex) => ( + <> + + + + + + + + + + + ))} + +
+ {columnNames.metric}{columnNames.begin_date}{columnNames.end_date}{columnNames.status}
+ setExpanded(item, !isResultExpanded(item.id)), + expandId: `expandId-${uid()}`, + }} + /> + + {item.primary_metrics[0]}{formatDateTime(item.begin_date)}{formatDateTime(item.end_date)} + +
+ + + +
+ + + )} + + ); +}; + +export default ILab; diff --git a/frontend/src/components/templates/ILab/index.less b/frontend/src/components/templates/ILab/index.less new file mode 100644 index 00000000..399c6c77 --- /dev/null +++ b/frontend/src/components/templates/ILab/index.less @@ -0,0 +1,41 @@ +.pf-v5-c-accordion__expandable-content-body { + display: block; +} +.metadata-wrapper { + display: flex; + flex-direction: row; + margin-bottom: 1vw; + .metadata-card { + flex: 1; /* additionally, equal width */ + padding: 1em; + margin-right: 1.5vw; + } +} +.comparison-container { + display: flex; + width: 100%; + height: 80%; + .metrics-container { + width: 40%; + padding: 10px; + .compare-btn { + margin: 2vh 0; + } + .pf-v5-c-menu { + height: 75%; + box-shadow: unset; + } + } + .chart-container { + width: 80%; + .js-plotly-plot { + width: 100%; + height: 100%; + overflow-x: auto; + overflow-y: visible; + } + } + .title { + margin-bottom: 2vh; + } +} diff --git a/frontend/src/reducers/ilabReducer.js b/frontend/src/reducers/ilabReducer.js new file mode 100644 index 00000000..f8cd5b0d --- /dev/null +++ b/frontend/src/reducers/ilabReducer.js @@ -0,0 +1,70 @@ +import * as TYPES from "@/actions/types"; + +const initialState = { + results: [], + start_date: "", + end_date: "", + graphData: [], + multiGraphData: [], + totalItems: 0, + page: 1, + perPage: 10, + size: 10, + offset: 0, + metrics: [], + periods: [], + metrics_selected: {}, + tableData: [], + comparisonSwitch: false, + metaRowExpanded: [], +}; +const ILabReducer = (state = initialState, action = {}) => { + const { type, payload } = action; + switch (type) { + case TYPES.SET_ILAB_JOBS_DATA: + return { + ...state, + results: payload, + }; + case TYPES.SET_ILAB_DATE_FILTER: + return { + ...state, + start_date: payload.start_date, + end_date: payload.end_date, + }; + case TYPES.SET_ILAB_TOTAL_ITEMS: + return { + ...state, + totalItems: payload, + }; + case TYPES.SET_ILAB_OFFSET: + return { ...state, offset: payload }; + case TYPES.SET_ILAB_PAGE: + return { ...state, page: payload }; + case TYPES.SET_ILAB_PAGE_OPTIONS: + return { ...state, page: payload.page, perPage: payload.perPage }; + case TYPES.SET_ILAB_METRICS: + return { ...state, metrics: [...state.metrics, payload] }; + case TYPES.SET_ILAB_PERIODS: + return { ...state, periods: [...state.periods, payload] }; + case TYPES.SET_ILAB_SELECTED_METRICS: + return { + ...state, + metrics_selected: payload, + }; + case TYPES.SET_ILAB_GRAPH_DATA: + return { ...state, graphData: payload }; + case TYPES.SET_ILAB_INIT_JOBS: + return { ...state, tableData: payload }; + case TYPES.SET_ILAB_MULTIGRAPH_DATA: + return { ...state, multiGraphData: payload }; + case TYPES.TOGGLE_COMPARISON_SWITCH: + return { ...state, comparisonSwitch: !state.comparisonSwitch }; + case TYPES.SET_EXPANDED_METAROW: + return { ...state, metaRowExpanded: payload }; + default: + return state; + } +}; + +export default ILabReducer; diff --git a/frontend/src/reducers/index.js b/frontend/src/reducers/index.js index 1fb4c555..43970170 100644 --- a/frontend/src/reducers/index.js +++ b/frontend/src/reducers/index.js @@ -1,4 +1,5 @@ import HomeReducer from "./homeReducer"; +import ILabReducer from "./ilabReducer"; import LoadingReducer from "./loadingReducer"; import OCPReducer from "./ocpReducer"; import QuayReducer from "./quayReducer"; @@ -15,4 +16,5 @@ export default combineReducers({ ocp: OCPReducer, quay: QuayReducer, telco: TelcoReducer, + ilab: ILabReducer, }); diff --git a/frontend/src/reducers/loadingReducer.js b/frontend/src/reducers/loadingReducer.js index 496a4e65..52f0c732 100644 --- a/frontend/src/reducers/loadingReducer.js +++ b/frontend/src/reducers/loadingReducer.js @@ -7,7 +7,7 @@ import { const initialState = { isLoading: false, - isGraphLoading: true, + isGraphLoading: false, }; const LoadingReducer = (state = initialState, action = {}) => { diff --git a/frontend/src/store/reducers/InitialData.js b/frontend/src/store/reducers/InitialData.js deleted file mode 100644 index 80503b3c..00000000 --- a/frontend/src/store/reducers/InitialData.js +++ /dev/null @@ -1,181 +0,0 @@ - -export const OCP_INITIAL_DATA = { - initialState: true, - success: 0, - failure: 0, - total: 0, - others: 0, - duration:0, - benchmarks: ["All"], - versions: ["All"], - workers: ["All"], - ciSystems: ["All"], - networkTypes: ["All"], - jobTypes: ["All"], - rehearses: ["All"], - allIpsec: ["All"], - allFips: ["All"], - allEncrypted: ["All"], - encryptionTypes: ["All"], - allPublish: ["All"], - computeArchs: ["All"], - controlPlaneArchs: ["All"], - jobStatuses: ["All"], - selectedBenchmark: "All", - selectedVersion: "All", - selectedPlatform: "All", - selectedWorkerCount: "All", - selectedNetworkType: "All", - selectedCiSystem: "All", - selectedJobType: "All", - selectedRehearse: "All", - selectedIpsec: "All", - selectedFips: "All", - selectedEncrypted: "All", - selectedEncryptionType: "All", - selectedPublish: "All", - selectedComputeArch: "All", - selectedControlPlaneArch: "All", - selectedJobStatus: "All", - waitForUpdate: false, - platforms: ["All"], - copyData: [], - data: [], - updatedTime: 'Loading', - error: null, - startDate: '', - endDate: '', - tableData : [{ name: "Benchmark", value: "benchmark" }, - {name:"Release Stream", value: "releaseStream"}, - {name:"Build", value: "build"}, - {name: "Worker Count", value: "workerNodesCount"}, - {name: "Start Date", value: "startDate"}, - {name: "End Date", value: "endDate"}, - {name: "Status", value: "jobStatus"}], -} - -export const QUAY_INITIAL_DATA = { - initialState: true, - success: 0, - failure: 0, - total: 0, - others: 0, - duration:0, - ciSystems: ["All"], - platforms: ["All"], - benchmarks: ["All"], - releaseStreams: ["All"], - workers: ["All"], - hitSizes: ["All"], - concurrencies: ["All"], - imagePushPulls: ["All"], - selectedCiSystem: "All", - selectedPlatform: "All", - selectedBenchmark: "All", - selectedReleaseStream: "All", - selectedWorkerCount: "All", - selectedHitSize: "All", - selectedConcurrency: "All", - selectedImagePushPulls: "All", - waitForUpdate: false, - copyData: [], - data: [], - updatedTime: 'Loading', - error: null, - startDate: '', - endDate: '', - tableData : [{ name: "Benchmark", value: "benchmark" }, - {name:"Release Stream", value: "releaseStream"}, - {name:"Platform", value: "platform"}, - {name: "Worker Count", value: "workerNodesCount"}, - {name: "Start Date", value: "startDate"}, - {name: "End Date", value: "endDate"}, - {name: "Status", value: "jobStatus"}], -} - -export const TELCO_INITIAL_DATA = { - initialState: true, - success: 0, - failure: 0, - total: 0, - others: 0, - duration:0, - ciSystems: ["All"], - benchmarks: ["All"], - versions: ["All"], - releaseStreams: ["All"], - formals: ["All"], - nodeNames: ["All"], - cpus: ["All"], - selectedCiSystem: "All", - selectedBenchmark: "All", - selectedVersion: "All", - selectedReleaseStream: "All", - selectedFormal: "All", - selectedCpu: "All", - selectedNodeName: "All", - waitForUpdate: false, - copyData: [], - data: [], - updatedTime: 'Loading', - error: null, - startDate: '', - endDate: '', - tableData : [{ name: "Benchmark", value: "benchmark" }, - {name:"Release Stream", value: "releaseStream"}, - {name:"Build", value: "ocpVersion"}, - {name:"CPU", value: "cpu"}, - {name:"Node Name", value: "nodeName"}, - {name: "Start Date", value: "startDate"}, - {name: "End Date", value: "endDate"}, - {name: "Status", value: "jobStatus"}], -} - -export const CPT_INITIAL_DATA = { - initialState: true, - success: 0, - failure: 0, - total: 0, - others: 0, - testNames: ["All"], - products: ["All"], - ciSystems: ["All"], - statuses: ["All"], - releaseStreams: ["All"], - selectedCiSystem: "All", - selectedProduct: "All", - selectedTestName: "All", - selectedJobStatus: "All", - selectedReleaseStream: "All", - waitForUpdate: false, - copyData: [], - data: [], - updatedTime: 'Loading', - error: null, - startDate: '', - endDate: '', - tableData : [{name:"Product", value: "product"}, - { name: "CI System", value: "ciSystem" }, - {name: "Test Name", value: "testName"}, - {name: "Version", value: "version"}, - {name: "Release Stream", value: "releaseStream"}, - {name: "Start Date", value: "startDate"}, - {name: "End Date", value: "endDate"}, - {name: "Build URL", value: "buildUrl"}, - {name: "Status", value: "jobStatus"},], -} - -export const GRAPH_INITIAL_DATA = { - uuid_results: {}, - graphError: false, -} - -export const QUAY_GRAPH_INITIAL_DATA = { - uuid_results: {}, - graphError: false, -} - -export const TELCO_GRAPH_INITIAL_DATA = { - uuid_results: {}, - graphError: false, -} diff --git a/frontend/src/store/reducers/index.js b/frontend/src/store/reducers/index.js deleted file mode 100644 index fe4fddad..00000000 --- a/frontend/src/store/reducers/index.js +++ /dev/null @@ -1,18 +0,0 @@ -import ocpJobsReducer from "./OCPJobsReducer"; -import cptJobsReducer from "./CPTJobsReducer"; -import quayJobsReducer from "./QuayJobsReducer"; -import telcoJobsReducer from "./TelcoJobsReducer"; -import graphReducer from "./GraphReducer"; -import quayGraphReducer from "./QuayGraphReducer"; -import telcoGraphReducer from "./TelcoGraphReducer"; - - -export const rootReducer = { - 'ocpJobs': ocpJobsReducer, - 'cptJobs': cptJobsReducer, - 'quayJobs': quayJobsReducer, - 'telcoJobs': telcoJobsReducer, - 'graph': graphReducer, - 'quayGraph': quayGraphReducer, - 'telcoGraph': telcoGraphReducer, -} diff --git a/frontend/src/utils/apiConstants.js b/frontend/src/utils/apiConstants.js index 52576b4a..33fe0ccd 100644 --- a/frontend/src/utils/apiConstants.js +++ b/frontend/src/utils/apiConstants.js @@ -1,7 +1,7 @@ export const getUrl = () => { const { hostname, protocol } = window.location; return hostname === "localhost" - ? "http://localhost:8000" + ? "http://0.0.0.0:8000" : `${protocol}//${hostname}`; }; @@ -17,3 +17,6 @@ export const QUAY_GRAPH_API_V1 = "/api/v1/quay/graph"; export const TELCO_JOBS_API_V1 = "/api/v1/telco/jobs"; export const TELCO_GRAPH_API_V1 = "/api/v1/telco/graph"; + +export const ILABS_JOBS_API_V1 = "/api/v1/ilab/runs"; +export const ILAB_GRAPH_API_V1 = "/api/v1/ilab/runs/"; diff --git a/frontend/src/utils/routeConstants.js b/frontend/src/utils/routeConstants.js index 53f271fa..c46bab55 100644 --- a/frontend/src/utils/routeConstants.js +++ b/frontend/src/utils/routeConstants.js @@ -2,3 +2,4 @@ export const HOME = "Home"; export const OCP = "OCP"; export const QUAY = "QUAY"; export const TELCO = "TELCO"; +export const ILAB = "ILAB"; diff --git a/local-compose.sh b/local-compose.sh index 284595a5..43f1b507 100755 --- a/local-compose.sh +++ b/local-compose.sh @@ -13,9 +13,6 @@ podman rm -f front back podman build -f backend/backend.containerfile --tag backend podman build -f frontend/frontend.containerfile --tag frontend -# NOTE: add --network=host to test against a local containerized Horreum podman run -d --name=back -p ${CPT_BACKEND_PORT}:8000 --network=host -v "${CPT_CONFIG}:/backend/ocpperf.toml:Z" localhost/backend podman run -d --name=front --net=host -p ${CPT_FRONTEND_PORT}:3000 localhost/frontend - -