diff --git a/.dockerignore b/.dockerignore
index 8b43ff2..03a268b 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,5 +1,34 @@
-# ignore pycache
-**/__pycache__/
+# Include any files or directories that you don't want to be copied to your
+# container here (e.g., local build artifacts, temporary files, etc.).
+#
+# For more help, visit the .dockerignore file reference guide at
+# https://docs.docker.com/go/build-context-dockerignore/
-# ignore venv folder
-venv/
\ No newline at end of file
+**/.DS_Store
+**/__pycache__
+**/.venv
+**/.classpath
+**/.dockerignore
+**/.env
+**/.git
+**/.gitignore
+**/.project
+**/.settings
+**/.toolstarget
+**/.vs
+**/.vscode
+**/*.*proj.user
+**/*.dbmdl
+**/*.jfm
+**/bin
+**/charts
+**/docker-compose*
+**/compose.y*ml
+**/Dockerfile*
+**/node_modules
+**/npm-debug.log
+**/obj
+**/secrets.dev.yaml
+**/values.dev.yaml
+LICENSE
+README.md
diff --git a/.github/workflows/basic.yml b/.github/workflows/basic.yml
new file mode 100644
index 0000000..325dc02
--- /dev/null
+++ b/.github/workflows/basic.yml
@@ -0,0 +1,28 @@
+name: Basic Tests
+
+on:
+ push:
+ branches: [ main, dev ]
+ pull_request:
+ branches: [ main, dev ]
+ workflow_dispatch:
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v4
+
+ - name: Set up Python
+ run: uv python install 3.10
+
+ - name: Install dependencies
+ run: |
+ uv sync --extra dev
+
+ - name: Run tests
+ run: |
+ uv run pytest tests/unit/ -v --maxfail=5
diff --git a/modules/core/__init__.py b/.github/workflows/test.yml
similarity index 100%
rename from modules/core/__init__.py
rename to .github/workflows/test.yml
diff --git a/.github/workflows/test.yml.backup b/.github/workflows/test.yml.backup
new file mode 100644
index 0000000..7cdb81a
--- /dev/null
+++ b/.github/workflows/test.yml.backup
@@ -0,0 +1,247 @@
+name: Tests
+
+on:
+ push:
+ branches: [ main, dev ]
+ pull_request:
+ branches: [ main, dev ]
+ workflow_dispatch: # Allow manual triggering
+
+jobs:
+ validate:
+ runs-on: ubuntu-latest
+ outputs:
+ should_run: ${{ steps.changes.outputs.should_run }}
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Check if we should run tests
+ id: changes
+ run: |
+ echo "should_run=true" >> $GITHUB_OUTPUT
+ echo "Workflow validation passed"
+
+ - name: Validate pyproject.toml
+ run: |
+ python -c "import tomllib; tomllib.load(open('pyproject.toml', 'rb'))" || \
+ python -c "import tomli; tomli.load(open('pyproject.toml', 'rb'))" || \
+ echo "Could not validate pyproject.toml, but continuing anyway"
+
+ test:
+ needs: validate
+ if: needs.validate.outputs.should_run == 'true'
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, windows-latest, macos-latest]
+ python-version: ["3.10", "3.11", "3.12"]
+ include:
+ # Test with GPU on Ubuntu
+ - os: ubuntu-latest
+ python-version: "3.10"
+ gpu: true
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+ with:
+ version: "latest"
+
+ - name: Set up Python ${{ matrix.python-version }}
+ run: uv python install ${{ matrix.python-version }}
+
+ - name: Install dependencies
+ run: |
+ uv sync --extra dev
+
+ - name: Download test models
+ run: |
+ uv run python tests/models/create_test_models.py
+ continue-on-error: true
+
+ - name: Lint with ruff
+ run: |
+ uv run ruff check .
+
+ - name: Check formatting with black
+ run: |
+ uv run black --check .
+
+ - name: Check import sorting with isort
+ run: |
+ uv run isort --check-only .
+
+ - name: Type check with mypy
+ run: |
+ uv run mypy framework
+ continue-on-error: true
+
+ - name: Run unit tests
+ run: |
+ uv run pytest tests/unit/ --cov=framework --cov-report=xml --junitxml=junit.xml -v
+
+ - name: Run integration tests
+ run: |
+ uv run pytest tests/integration/ --cov=framework --cov-append --cov-report=xml --junitxml=junit-integration.xml -v
+
+ - name: Run GPU tests
+ if: matrix.gpu == true
+ run: |
+ # Only run if CUDA is available
+ uv run python -c "import torch; exit(0 if torch.cuda.is_available() else 1)" && \
+ uv run pytest -m gpu tests/ --cov=framework --cov-append --cov-report=xml -v || \
+ echo "CUDA not available, skipping GPU tests"
+
+ - name: Security scan with bandit
+ run: |
+ uv run bandit -r framework -f json -o bandit-report.json
+ uv run bandit -r framework
+ continue-on-error: true
+
+ - name: Safety check
+ run: |
+ uv run safety check --json --output safety-report.json
+ uv run safety check
+ continue-on-error: true
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v3
+ with:
+ file: ./coverage.xml
+ flags: unittests
+ name: codecov-umbrella
+ fail_ci_if_error: false
+
+ benchmark:
+ needs: validate
+ if: needs.validate.outputs.should_run == 'true'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+ with:
+ version: "latest"
+
+ - name: Set up Python
+ run: uv python install 3.10
+
+ - name: Install dependencies
+ run: |
+ uv sync --extra dev
+
+ - name: Download test models
+ run: |
+ uv run python tests/models/create_test_models.py
+ continue-on-error: true
+
+ - name: Run benchmarks
+ run: |
+ uv run pytest -m benchmark --benchmark-only --benchmark-json=benchmark.json tests/
+ continue-on-error: true
+
+ - name: Store benchmark results
+ uses: benchmark-action/github-action-benchmark@v1
+ if: github.ref == 'refs/heads/main' && hashFiles('benchmark.json') != ''
+ with:
+ tool: 'pytest'
+ output-file-path: benchmark.json
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ auto-push: true
+ continue-on-error: true
+
+ docs:
+ needs: validate
+ if: needs.validate.outputs.should_run == 'true'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+ with:
+ version: "latest"
+
+ - name: Set up Python
+ run: uv python install 3.10
+
+ - name: Install dependencies
+ run: |
+ uv sync --extra docs
+
+ - name: Check if mkdocs.yml exists
+ run: |
+ if [ -f "mkdocs.yml" ] || [ -f "mkdocs.yaml" ]; then
+ echo "MkDocs configuration found"
+ uv run mkdocs build --strict
+ else
+ echo "No MkDocs configuration found, skipping docs build"
+ fi
+
+ - name: Deploy docs to GitHub Pages
+ if: github.ref == 'refs/heads/main' && (hashFiles('mkdocs.yml') != '' || hashFiles('mkdocs.yaml') != '')
+ run: |
+ uv run mkdocs gh-deploy --force
+
+ docker:
+ needs: validate
+ if: needs.validate.outputs.should_run == 'true'
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Build Docker image
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ load: true
+ tags: torch-inference:test
+
+ - name: Test in Docker
+ run: |
+ docker run --rm torch-inference:test python -m pytest tests/unit/ -v
+
+ release:
+ if: github.event_name == 'push' && github.ref == 'refs/heads/main'
+ needs: [validate, test, benchmark, docs, docker]
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ fetch-depth: 0
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+ with:
+ version: "latest"
+
+ - name: Set up Python
+ run: uv python install 3.10
+
+ - name: Install dependencies
+ run: |
+ uv sync --extra dev
+ uv add twine
+
+ - name: Build package
+ run: |
+ uv build
+
+ - name: Check package
+ run: |
+ uv run twine check dist/*
+
+ - name: Upload to Test PyPI
+ if: success() && secrets.TEST_PYPI_API_TOKEN != ''
+ uses: pypa/gh-action-pypi-publish@release/v1
+ with:
+ password: ${{ secrets.TEST_PYPI_API_TOKEN }}
+ repository_url: https://test.pypi.org/legacy/
+ skip_existing: true
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..2c945fe
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,160 @@
+# Pre-commit configuration for torch-inference framework
+# Runs automated checks before each commit
+
+repos:
+ # Code formatting
+ - repo: https://github.com/psf/black
+ rev: 23.12.1
+ hooks:
+ - id: black
+ language_version: python3
+ args: [--line-length=88]
+
+ # Import sorting
+ - repo: https://github.com/PyCQA/isort
+ rev: 5.13.2
+ hooks:
+ - id: isort
+ args: [--profile, black, --line-length=88]
+
+ # Linting and code quality
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ rev: v0.1.9
+ hooks:
+ - id: ruff
+ args: [--fix, --exit-non-zero-on-fix]
+
+ # Type checking
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v1.8.0
+ hooks:
+ - id: mypy
+ additional_dependencies: [types-requests, types-PyYAML, types-Pillow]
+ args: [--ignore-missing-imports, --install-types, --non-interactive]
+
+ # Security scanning
+ - repo: https://github.com/PyCQA/bandit
+ rev: 1.7.5
+ hooks:
+ - id: bandit
+ args: [-r, framework, -f, json, -o, bandit-report.json]
+ pass_filenames: false
+
+ # General pre-commit hooks
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ # Basic file checks
+ - id: check-added-large-files
+ args: [--maxkb=1000]
+ - id: check-case-conflict
+ - id: check-executables-have-shebangs
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-toml
+ - id: check-yaml
+ args: [--unsafe]
+ - id: check-json
+ - id: check-xml
+
+ # Python-specific checks
+ - id: check-ast
+ - id: check-builtin-literals
+ - id: check-docstring-first
+ - id: debug-statements
+ - id: name-tests-test
+ args: [--pytest-test-first]
+
+ # File cleanup
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+ args: [--markdown-linebreak-ext=md]
+ - id: mixed-line-ending
+ args: [--fix=lf]
+
+ # Requirements files
+ - id: requirements-txt-fixer
+
+ # Documentation
+ - repo: https://github.com/pycqa/pydocstyle
+ rev: 6.3.0
+ hooks:
+ - id: pydocstyle
+ args: [--convention=google, --add-ignore=D100,D104,D105,D107]
+ exclude: ^(tests/|examples/)
+
+ # Jupyter notebooks (if any)
+ - repo: https://github.com/nbQA-dev/nbQA
+ rev: 1.7.1
+ hooks:
+ - id: nbqa-black
+ - id: nbqa-isort
+ - id: nbqa-ruff
+
+ # YAML formatting
+ - repo: https://github.com/pre-commit/mirrors-prettier
+ rev: v4.0.0-alpha.8
+ hooks:
+ - id: prettier
+ types_or: [yaml, markdown]
+ exclude: ^(.*\.md|CHANGELOG\.md)$
+
+ # Spell checking (optional, can be disabled)
+ - repo: https://github.com/crate-ci/typos
+ rev: v1.16.26
+ hooks:
+ - id: typos
+ exclude: ^(tests/fixtures/|\.git/)
+ args: [--format, brief]
+
+ # Docker
+ - repo: https://github.com/hadolint/hadolint
+ rev: v2.12.0
+ hooks:
+ - id: hadolint-docker
+ args: [--ignore, DL3008, --ignore, DL3009]
+
+ # Local hooks
+ - repo: local
+ hooks:
+ # Custom pytest hook for fast tests
+ - id: pytest-fast
+ name: Run fast tests
+ entry: uv run pytest
+ args: [-m, "not slow and not gpu", --tb=short, -q]
+ language: system
+ types: [python]
+ require_serial: true
+ pass_filenames: false
+ stages: [pre-push]
+
+ # Custom safety check
+ - id: safety-check
+ name: Safety dependency scan
+ entry: uv run safety
+ args: [check, --short-report]
+ language: system
+ pass_filenames: false
+ stages: [pre-push]
+
+ # Custom model validation
+ - id: validate-test-models
+ name: Validate test models
+ entry: uv run python
+ args: [-c, "from tests.models.model_loader import TestModelLoader; TestModelLoader().validate_models()"]
+ language: system
+ pass_filenames: false
+ stages: [pre-push]
+ always_run: true
+
+# Configuration
+default_stages: [commit]
+fail_fast: false
+minimum_pre_commit_version: 3.0.0
+
+# CI configuration
+ci:
+ autoupdate_schedule: monthly
+ autofix_commit_msg: 'style: auto-fix pre-commit hooks'
+ autoupdate_commit_msg: 'chore: update pre-commit hooks'
+ skip: [pytest-fast, safety-check, validate-test-models]
diff --git a/.uvrc b/.uvrc
new file mode 100644
index 0000000..a7131ee
--- /dev/null
+++ b/.uvrc
@@ -0,0 +1,23 @@
+# uv Project Configuration
+# This file contains uv-specific settings and shortcuts
+
+# Environment variables for CUDA
+CUDA_VISIBLE_DEVICES=0
+TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;8.9;9.0"
+
+# uv run shortcuts (add to your shell profile)
+alias torch-run="uv run python"
+alias torch-benchmark="uv run python benchmark.py"
+alias torch-demo="uv run python optimization_demo.py"
+alias torch-test="uv run python test_installation.py"
+
+# Development shortcuts
+alias torch-format="uv run black . && uv run ruff check --fix ."
+alias torch-lint="uv run ruff check ."
+alias torch-type="uv run mypy ."
+alias torch-test-unit="uv run pytest"
+
+# uv environment info
+# Run: uv info
+# Check: uv tree
+# Update: uv sync
diff --git a/Dockerfile b/Dockerfile
index c1cab0b..016445a 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,22 +1,51 @@
-# Use an NVIDIA CUDA base image that includes runtime libraries
-FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04
+# syntax=docker/dockerfile:1
-# Install system packages, Python, pip, and git
-RUN apt-get update && apt-get install -y --no-install-recommends \
- python3 \
- python3-pip \
- git \
- && rm -rf /var/lib/apt/lists/*
+# Comments are provided throughout this file to help you get started.
+# If you need more help, visit the Dockerfile reference guide at
+# https://docs.docker.com/go/dockerfile-reference/
-# Upgrade pip and install Jupyter
-RUN pip3 install --upgrade pip && \
- pip3 install notebook jupyterlab
+# Want to help us make this template better? Share your feedback here: https://forms.gle/ybq9Krt8jtBL3iCk7
-# (Optional) If you want data science libraries, add:
-# RUN pip3 install numpy pandas matplotlib scipy scikit-learn
+ARG PYTHON_VERSION=3.10.11
+FROM python:${PYTHON_VERSION}-slim as base
-# Expose port 8888 for Jupyter
-EXPOSE 8888
+# Prevents Python from writing pyc files.
+ENV PYTHONDONTWRITEBYTECODE=1
-# Set the default command to run Jupyter Notebook
-CMD ["jupyter", "notebook", "--ip=0.0.0.0", "--port=8888", "--no-browser", "--allow-root"]
+# Keeps Python from buffering stdout and stderr to avoid situations where
+# the application crashes without emitting any logs due to buffering.
+ENV PYTHONUNBUFFERED=1
+
+WORKDIR /app
+
+# Create a non-privileged user that the app will run under.
+# See https://docs.docker.com/go/dockerfile-user-best-practices/
+ARG UID=10001
+RUN adduser \
+ --disabled-password \
+ --gecos "" \
+ --home "/nonexistent" \
+ --shell "/sbin/nologin" \
+ --no-create-home \
+ --uid "${UID}" \
+ appuser
+
+# Download dependencies as a separate step to take advantage of Docker's caching.
+# Leverage a cache mount to /root/.cache/pip to speed up subsequent builds.
+# Leverage a bind mount to requirements.txt to avoid having to copy them into
+# into this layer.
+RUN --mount=type=cache,target=/root/.cache/pip \
+ --mount=type=bind,source=requirements.txt,target=requirements.txt \
+ python -m pip install -r requirements.txt
+
+# Switch to the non-privileged user to run the application.
+USER appuser
+
+# Copy the source code into the container.
+COPY . .
+
+# Expose the port that the application listens on.
+EXPOSE 8000
+
+# Run the application.
+CMD gunicorn 'venv.Lib.site-packages.fastapi.middleware.wsgi' --bind=0.0.0.0:8000
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..7d45208
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,244 @@
+# Makefile for torch-inference framework testing
+# Provides convenient commands for development and testing
+
+.PHONY: help test test-unit test-integration test-all coverage lint format type-check clean install dev docs security benchmark
+
+# Default target
+.DEFAULT_GOAL := help
+
+help: ## Show this help message
+ @echo "Available commands:"
+ @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[36m%-15s\033[0m %s\n", $$1, $$2}'
+
+# Installation and setup
+install: ## Install dependencies
+ uv sync
+
+install-dev: ## Install development dependencies
+ uv sync --extra dev
+
+install-all: ## Install all dependencies including optional ones
+ uv sync --extra all
+
+# Testing commands
+test: ## Run all tests
+ uv run pytest
+
+test-unit: ## Run unit tests only
+ uv run pytest tests/unit/
+
+test-integration: ## Run integration tests only
+ uv run pytest tests/integration/
+
+test-smoke: ## Run smoke tests for quick validation
+ uv run pytest -m smoke
+
+test-fast: ## Run fast tests only (no slow/gpu tests)
+ uv run pytest -m "not slow and not gpu"
+
+test-slow: ## Run slow tests only
+ uv run pytest -m slow
+
+test-gpu: ## Run GPU tests (requires CUDA)
+ uv run pytest -m gpu
+
+test-tensorrt: ## Run TensorRT tests
+ uv run pytest -m tensorrt
+
+test-onnx: ## Run ONNX tests
+ uv run pytest -m onnx
+
+test-enterprise: ## Run enterprise feature tests
+ uv run pytest -m enterprise
+
+test-parallel: ## Run tests in parallel
+ uv run pytest -n auto
+
+test-verbose: ## Run tests with verbose output
+ uv run pytest -v
+
+test-debug: ## Run tests with debugging info
+ uv run pytest -vvv --tb=long --showlocals
+
+test-failed: ## Re-run only failed tests
+ uv run pytest --lf
+
+test-new: ## Run only new/modified tests
+ uv run pytest --ff
+
+# Coverage and reporting
+coverage: ## Run tests with coverage reporting
+ uv run pytest --cov=framework --cov-report=html --cov-report=term-missing
+
+coverage-xml: ## Generate XML coverage report
+ uv run pytest --cov=framework --cov-report=xml
+
+coverage-html: ## Generate HTML coverage report
+ uv run pytest --cov=framework --cov-report=html
+ @echo "Coverage report available at htmlcov/index.html"
+
+benchmark: ## Run performance benchmarks
+ uv run pytest -m benchmark --benchmark-only --benchmark-sort=mean
+
+# Code quality
+lint: ## Run all linting checks
+ uv run black --check .
+ uv run ruff check .
+ uv run isort --check-only .
+
+lint-fix: ## Fix linting issues
+ uv run black .
+ uv run ruff check --fix .
+ uv run isort .
+
+format: ## Format code
+ uv run black .
+ uv run isort .
+
+type-check: ## Run type checking
+ uv run mypy framework
+
+security: ## Run security scans
+ uv run bandit -r framework
+ uv run safety check
+
+# Documentation
+docs: ## Build documentation
+ uv run mkdocs build
+
+docs-serve: ## Serve documentation locally
+ uv run mkdocs serve
+
+docs-deploy: ## Deploy documentation
+ uv run mkdocs gh-deploy
+
+# Environment management
+clean: ## Clean up generated files
+ rm -rf .pytest_cache/
+ rm -rf htmlcov/
+ rm -rf .coverage
+ rm -rf coverage.xml
+ rm -rf junit.xml
+ rm -rf .tox/
+ rm -rf build/
+ rm -rf dist/
+ rm -rf *.egg-info/
+ find . -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
+ find . -type f -name "*.pyc" -delete
+
+clean-models: ## Clean test models
+ rm -rf tests/models/models/
+ rm -f tests/models/model_registry.json
+
+setup-models: ## Download test models
+ uv run python tests/models/create_test_models.py
+
+# Development helpers
+dev: ## Setup development environment
+ $(MAKE) install-dev
+ $(MAKE) setup-models
+ uv run pre-commit install
+
+dev-test: ## Quick development test run
+ uv run pytest tests/unit/ -x --tb=short
+
+watch: ## Watch for changes and run tests
+ uv run pytest-watch
+
+# CI/CD helpers
+ci-test: ## Run tests for CI (with XML reports)
+ uv run pytest --cov=framework --cov-report=xml --junitxml=junit.xml
+
+ci-lint: ## Run linting for CI
+ uv run black --check .
+ uv run ruff check . --output-format=github
+ uv run isort --check-only .
+
+ci-security: ## Run security checks for CI
+ uv run bandit -r framework -f json -o bandit-report.json
+ uv run safety check --json --output safety-report.json
+
+# Docker helpers
+docker-build: ## Build Docker image for testing
+ docker build -t torch-inference-test .
+
+docker-test: ## Run tests in Docker
+ docker run --rm torch-inference-test make test
+
+# Tox integration
+tox: ## Run tox for multi-environment testing
+ tox
+
+tox-recreate: ## Recreate tox environments
+ tox --recreate
+
+# Advanced testing scenarios
+stress-test: ## Run stress tests
+ uv run pytest tests/ -x --count=10
+
+memory-test: ## Run memory profiling
+ uv run pytest --memray tests/
+
+profile: ## Profile test execution
+ uv run pytest --profile tests/
+
+# Release helpers
+check-release: ## Check if ready for release
+ $(MAKE) lint
+ $(MAKE) type-check
+ $(MAKE) security
+ $(MAKE) test
+ $(MAKE) coverage
+
+# Example usage targets
+example: ## Run basic usage example
+ uv run python examples/basic_usage.py
+
+example-config: ## Run configuration example
+ uv run python examples/config_example.py
+
+example-enterprise: ## Run enterprise example
+ uv run python examples/enterprise_example.py
+
+# Maintenance
+update-deps: ## Update dependencies
+ uv lock --upgrade
+
+check-deps: ## Check for dependency updates
+ uv tree
+
+outdated: ## Check for outdated packages
+ @echo "Use 'uv lock --upgrade' to update dependencies"
+
+# Help for specific test files
+test-config: ## Test configuration module
+ uv run pytest tests/unit/test_config.py -v
+
+test-inference: ## Test inference engine
+ uv run pytest tests/unit/test_inference_engine.py -v
+
+test-optimizers: ## Test optimizers
+ uv run pytest tests/unit/test_optimizers.py -v
+
+test-adapters: ## Test model adapters
+ uv run pytest tests/unit/test_adapters.py -v
+
+test-framework: ## Test main framework
+ uv run pytest tests/unit/test_framework.py -v
+
+# Windows-specific commands (use 'make' equivalent on Windows with compatible tools)
+ifeq ($(OS),Windows_NT)
+ RM = del /Q /F
+ RMDIR = rmdir /Q /S
+else
+ RM = rm -f
+ RMDIR = rm -rf
+endif
+
+# Optional: Print environment info
+info: ## Show environment information
+ @echo "Python version: $(shell python --version)"
+ @echo "UV version: $(shell uv --version)"
+ @echo "Pytest version: $(shell uv run pytest --version)"
+ @echo "PyTorch version: $(shell uv run python -c 'import torch; print(torch.__version__)')"
+ @echo "CUDA available: $(shell uv run python -c 'import torch; print(torch.cuda.is_available())')"
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..e62c43a
--- /dev/null
+++ b/README.md
@@ -0,0 +1,259 @@
+# ๐ PyTorch Inference Framework
+
+> **Production-ready PyTorch inference framework with TensorRT, ONNX, quantization, and advanced acceleration techniques**
+
+[](https://www.python.org/)
+[](https://pytorch.org/)
+[](https://developer.nvidia.com/cuda-toolkit)
+[](https://developer.nvidia.com/tensorrt)
+[](https://github.com/astral-sh/uv)
+
+A comprehensive, production-ready PyTorch inference framework that delivers **2-10x performance improvements** through advanced optimization techniques including TensorRT, ONNX Runtime, quantization, JIT compilation, and CUDA optimizations.
+
+## ๐ Documentation
+
+**Complete documentation is available in the [`docs/`](docs/) directory:**
+
+- **[๐ Documentation Overview](docs/README.md)** - Complete documentation guide
+- **[๐ Quick Start](docs/quickstart.md)** - Get started in minutes
+- **[๐ฆ Installation](docs/installation.md)** - Complete setup instructions
+- **[โ๏ธ Configuration](docs/configuration.md)** - Configuration management
+- **[๐ Examples](docs/examples.md)** - Code examples and tutorials
+- **[๐งช Testing](docs/testing.md)** - Test suite documentation
+
+## ๐ Key Features
+
+### ๐ **Performance Optimizations**
+- **TensorRT Integration**: 2-5x GPU speedup with automatic optimization
+- **ONNX Runtime**: Cross-platform optimization with 1.5-3x performance gains
+- **Dynamic Quantization**: 2-4x memory reduction with minimal accuracy loss
+- **JIT Compilation**: PyTorch native optimization with 20-50% speedup
+- **CUDA Graphs**: Advanced GPU optimization for consistent low latency
+- **Memory Pooling**: 30-50% memory usage reduction
+
+### โก **Production-Ready Features**
+- **Async Processing**: High-throughput async inference with dynamic batching
+- **FastAPI Integration**: Production-ready REST API with automatic documentation
+- **Performance Monitoring**: Real-time metrics and profiling capabilities
+- **Multi-Framework Support**: PyTorch, ONNX, TensorRT, HuggingFace models
+- **Device Auto-Detection**: Automatic GPU/CPU optimization selection
+- **Graceful Fallbacks**: Robust error handling with optimization fallbacks
+
+### ๐ง **Developer Experience**
+- **Modern Package Manager**: Powered by `uv` for 10-100x faster dependency resolution
+- **Comprehensive Documentation**: Detailed guides, examples, and API reference
+- **Type Safety**: Full type annotations with mypy validation
+- **Code Quality**: Black formatting, Ruff linting, pre-commit hooks
+- **Testing Suite**: Comprehensive unit tests with pytest
+- **Docker Support**: Production-ready containerization
+
+## โก Quick Start
+
+### Installation
+```bash
+# Install uv package manager
+pip install uv
+
+# Clone and setup the framework
+git clone https://github.com/Evintkoo/torch-inference.git
+cd torch-inference
+
+# Run automated setup
+uv sync && uv run python test_installation.py
+```
+
+### Basic Usage
+```python
+from framework import create_pytorch_framework
+
+# Initialize framework with automatic optimization
+framework = create_pytorch_framework(
+ model_path="path/to/your/model.pt",
+ device="cuda" if torch.cuda.is_available() else "cpu",
+ enable_optimization=True # Automatic TensorRT/ONNX optimization
+)
+
+# Single prediction
+result = framework.predict(input_data)
+print(f"Prediction: {result}")
+```
+
+### Async High-Performance Processing
+```python
+import asyncio
+from framework import create_async_framework
+
+async def async_example():
+ framework = await create_async_framework(
+ model_path="path/to/your/model.pt",
+ batch_size=16, # Dynamic batching
+ enable_tensorrt=True # TensorRT optimization
+ )
+
+ # Concurrent predictions
+ tasks = [framework.predict_async(data) for data in batch_inputs]
+ results = await asyncio.gather(*tasks)
+
+ await framework.close()
+
+asyncio.run(async_example())
+```
+
+## ๐ฏ Use Cases
+
+- **๐ผ๏ธ Image Classification**: High-performance image inference with CNNs
+- **๐ Text Processing**: NLP models with BERT, GPT, and transformers
+- **๐ Object Detection**: Real-time object detection with YOLO, R-CNN
+- **๐ Production APIs**: REST APIs with FastAPI integration
+- **๐ Batch Processing**: Large-scale batch inference workloads
+- **โก Real-time Systems**: Low-latency real-time inference
+
+## ๐ Performance Benchmarks
+
+| Model Type | Baseline | Optimized | Speedup | Memory Saved |
+|------------|----------|-----------|---------|--------------|
+| **ResNet-50** | 100ms | **20ms** | **5x** | 81% |
+| **BERT-Base** | 50ms | **12ms** | **4.2x** | 75% |
+| **YOLOv8** | 80ms | **18ms** | **4.4x** | 71% |
+
+*See [benchmarks documentation](docs/benchmarks.md) for detailed performance analysis.*
+
+---
+
+## ๐ ๏ธ Optimization Techniques
+
+### 1. TensorRT Optimization (Recommended for NVIDIA GPUs)
+
+```python
+from framework.optimizers import TensorRTOptimizer
+
+# Create TensorRT optimizer
+trt_optimizer = TensorRTOptimizer(
+ precision="fp16", # fp32, fp16, or int8
+ max_batch_size=32, # Maximum batch size
+ workspace_size=1 << 30 # 1GB workspace
+)
+
+# Optimize model
+optimized_model = trt_optimizer.optimize_model(model, example_inputs)
+
+# Benchmark optimization
+benchmark = trt_optimizer.benchmark_optimization(model, optimized_model, inputs)
+print(f"TensorRT speedup: {benchmark['speedup']:.2f}x")
+```
+
+**Expected Results:**
+- 2-5x speedup on modern GPUs (RTX 30/40 series, A100, H100)
+- 50-80% memory reduction with INT8 quantization
+- Best for inference-only workloads
+
+### 2. ONNX Runtime Optimization
+
+```python
+from framework.optimizers import ONNXOptimizer
+
+# Export and optimize with ONNX
+onnx_optimizer = ONNXOptimizer(
+ providers=['CUDAExecutionProvider', 'CPUExecutionProvider'],
+ optimization_level='all'
+)
+
+optimized_model = onnx_optimizer.optimize_model(model, example_inputs)
+```
+
+**Expected Results:**
+- 1.5-3x speedup on CPU, 1.2-2x on GPU
+- Better cross-platform compatibility
+- Excellent for edge deployment
+
+### 3. Dynamic Quantization
+
+```python
+from framework.optimizers import QuantizationOptimizer
+
+# Dynamic quantization (easiest setup)
+quantized_model = QuantizationOptimizer.quantize_dynamic(
+ model, dtype=torch.qint8
+)
+
+# Static quantization (better performance)
+quantized_model = QuantizationOptimizer.quantize_static(
+ model, calibration_dataloader
+)
+```
+
+**Expected Results:**
+- 2-4x speedup on CPU
+- 50-75% memory reduction
+- <1% typical accuracy loss
+
+### 4. Complete Optimization Pipeline
+
+```python
+from framework.core.optimized_model import create_optimized_model
+
+# Automatic optimization selection
+config = InferenceConfig()
+config.optimization.auto_optimize = True # Automatic optimization
+config.optimization.benchmark_all = True # Benchmark all methods
+config.optimization.select_best = True # Auto-select best performer
+
+## ๐ณ Docker Deployment
+
+### Quick Setup
+```bash
+# Build and run with GPU support
+docker build -t torch-inference .
+docker run --gpus all -p 8000:8000 torch-inference
+
+# Or use docker compose
+docker compose up --build
+```
+
+See [Deployment Guide](docs/deployment.md) for production deployment.
+
+## ๐งช Testing
+
+```bash
+# Run all tests
+uv run pytest
+
+# Run with coverage
+uv run pytest --cov=framework --cov-report=html
+```
+
+See [Testing Documentation](docs/testing.md) for comprehensive test information.
+
+## ๏ฟฝ More Documentation
+
+- **[๐๏ธ Framework Architecture](docs/framework.md)** - Core framework concepts
+- **[๐ง Optimization Guide](docs/optimization.md)** - Performance optimization
+- **[๐ Deployment Guide](docs/deployment.md)** - Production deployment
+- **[๐ Monitoring Guide](docs/monitoring.md)** - Performance monitoring
+- **[๐ Security Guide](docs/security.md)** - Security features
+- **[๏ฟฝ API Reference](docs/api.md)** - Complete API documentation
+- **[๐จ Troubleshooting](docs/troubleshooting.md)** - Common issues and solutions
+
+## ๐ค Contributing
+
+We welcome contributions! See the [Contributing Guide](docs/contributing.md) for development setup and guidelines.
+
+## ๐ License
+
+This project is licensed under the **MIT License** - see the [LICENSE](LICENSE) file for details.
+
+## ๐ Support
+
+- ๐ **Issues**: [GitHub Issues](https://github.com/Evintkoo/torch-inference/issues)
+- ๐ฌ **Discussions**: [GitHub Discussions](https://github.com/Evintkoo/torch-inference/discussions)
+- ๐ง **Email**: [support@torch-inference.dev](mailto:support@torch-inference.dev)
+
+---
+
+
+
+**โญ Star this repository if it helped you!**
+
+*Built with โค๏ธ for the PyTorch community*
+
+
\ No newline at end of file
diff --git a/benchmark.py b/benchmark.py
deleted file mode 100644
index aa07c82..0000000
--- a/benchmark.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import asyncio
-import time
-import logging
-from dataclasses import dataclass
-from typing import Optional
-
-import torch
-import torchvision.models as models
-
-# Import your engine's configuration and class.
-from modules.core.engine import EngineConfig, InferenceEngine
-
-@dataclass
-class BenchmarkConfig:
- num_inputs: int = 10000 # Larger number of inference requests
- warmup_runs: int = 10
- input_channels: int = 3 # ResNet expects 3-channel images
- input_height: int = 224
- input_width: int = 224
- batch_size: int = 64 # Batch size for synchronous inference
- use_tensorrt: bool = True
- enable_dynamic_batching: bool = True
- profile: bool = True
- async_mode: bool = True
- sync_mode: bool = True
- max_concurrent: int = 256 # Maximum number of concurrent async requests
- log_file: Optional[str] = "benchmark.log"
- debug_mode: bool = True
-
-async def benchmark_async(engine, inputs, max_concurrent: int):
- """
- Benchmark asynchronous inference throughput with concurrency control.
- The semaphore limits the number of concurrently running async tasks.
- """
- semaphore = asyncio.Semaphore(max_concurrent)
- logger = logging.getLogger(__name__)
-
- async def sem_task(x):
- async with semaphore:
- return await engine.run_inference_async(x)
-
- start_time = time.perf_counter()
- tasks = [sem_task(x) for x in inputs]
- await asyncio.gather(*tasks, return_exceptions=False)
- duration = time.perf_counter() - start_time
- throughput = len(inputs) / duration
- seconds_per_pred = duration / len(inputs)
- logger.debug("Asynchronous benchmark completed in %.4f seconds", duration)
- return throughput, seconds_per_pred, duration
-
-def benchmark_sync(engine, inputs):
- """
- Benchmark synchronous batch inference throughput.
- Splits the inputs into batches and processes each batch sequentially.
- """
- logger = logging.getLogger(__name__)
- start_time = time.perf_counter()
-
- # Create batches by stacking inputs according to the engine's batch size
- batches = [
- torch.stack(inputs[i:i+engine.config.batch_size])
- for i in range(0, len(inputs), engine.config.batch_size)
- ]
-
- for batch in batches:
- engine.run_batch_inference(batch)
-
- duration = time.perf_counter() - start_time
- throughput = len(inputs) / duration
- seconds_per_pred = duration / len(inputs)
- logger.debug("Synchronous benchmark completed in %.4f seconds", duration)
- return throughput, seconds_per_pred, duration
-
-async def main(benchmark_config: BenchmarkConfig):
- logger = logging.getLogger(__name__)
- logger.info("Starting benchmark with configuration: %s", benchmark_config)
-
- # Use ResNet-50 (or change to any other model as needed)
- model = models.resnet50(pretrained=True)
- # Optionally modify the final layer for binary classification
- num_ftrs = model.fc.in_features
- model.fc = torch.nn.Linear(num_ftrs, 2)
- model = model.to("cuda")
-
- # Set up the engine configuration
- engine_config = EngineConfig(
- input_shape=[1, benchmark_config.input_channels, benchmark_config.input_height, benchmark_config.input_width],
- batch_size=benchmark_config.batch_size,
- use_tensorrt=benchmark_config.use_tensorrt,
- enable_dynamic_batching=benchmark_config.enable_dynamic_batching,
- log_file=benchmark_config.log_file,
- autoscale_interval=0
- )
-
- engine = InferenceEngine(model=model, config=engine_config)
-
- # Generate test inputs (random images) with shape [C, H, W]
- input_shape = (benchmark_config.input_channels, benchmark_config.input_height, benchmark_config.input_width)
- inputs = [torch.randn(*input_shape, device="cuda") for _ in range(benchmark_config.num_inputs)]
- logger.debug("Generated %d test inputs with shape %s", benchmark_config.num_inputs, input_shape)
-
- # Warmup: Run a few inferences to prepare the model/engine
- warmup_input = inputs[0]
- logger.info("Warming up the model with %d runs", benchmark_config.warmup_runs)
- for _ in range(benchmark_config.warmup_runs):
- await engine.run_inference_async(warmup_input)
-
- results = {}
-
- # Asynchronous Benchmark
- if benchmark_config.async_mode:
- throughput, sec_per_pred, duration = await benchmark_async(engine, inputs, benchmark_config.max_concurrent)
- results["async_throughput"] = throughput
- results["async_sec_per_pred"] = sec_per_pred
- logger.info("=== Asynchronous Inference ===")
- logger.info("Total Duration: %.4f seconds", duration)
- logger.info("Throughput: %.2f predictions/s", throughput)
- logger.info("Seconds per Prediction: %.6f s/pred", sec_per_pred)
-
- # Synchronous Benchmark
- if benchmark_config.sync_mode:
- throughput, sec_per_pred, duration = benchmark_sync(engine, inputs)
- results["sync_throughput"] = throughput
- results["sync_sec_per_pred"] = sec_per_pred
- logger.info("=== Synchronous Inference ===")
- logger.info("Total Duration: %.4f seconds", duration)
- logger.info("Throughput: %.2f predictions/s", throughput)
- logger.info("Seconds per Prediction: %.6f s/pred", sec_per_pred)
-
- # Profiling (if available and enabled)
- if benchmark_config.profile and hasattr(engine, 'profile_inference'):
- profile_input = inputs[0]
- profile_metrics = engine.profile_inference(profile_input)
- results["profile"] = profile_metrics
- logger.info("=== Profile Metrics ===")
- logger.info("Profile Metrics: %s", profile_metrics)
-
- await engine.cleanup()
- logger.info("Engine cleanup completed.")
-
- return results
-
-if __name__ == "__main__":
- # Directly create a benchmark configuration with all options enabled
- benchmark_config = BenchmarkConfig(
- num_inputs=2048*4, # Larger number of test inputs
- warmup_runs=10,
- input_channels=3,
- input_height=224,
- input_width=224,
- batch_size=64,
- use_tensorrt=True,
- enable_dynamic_batching=False,
- profile=True,
- async_mode=True,
- sync_mode=True,
- max_concurrent=256,
- log_file="benchmark.log",
- debug_mode=True
- )
-
- # Configure logging: set level based on debug_mode and add both console and file handlers.
- handlers = [logging.StreamHandler()]
- if benchmark_config.log_file:
- handlers.append(logging.FileHandler(benchmark_config.log_file))
- logging.basicConfig(
- level=logging.DEBUG if benchmark_config.debug_mode else logging.INFO,
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
- handlers=handlers
- )
-
- try:
- asyncio.run(main(benchmark_config))
- except KeyboardInterrupt:
- logging.warning("Benchmark interrupted!")
- exit(1)
diff --git a/compose.yaml b/compose.yaml
new file mode 100644
index 0000000..420ef6a
--- /dev/null
+++ b/compose.yaml
@@ -0,0 +1,49 @@
+# Comments are provided throughout this file to help you get started.
+# If you need more help, visit the Docker Compose reference guide at
+# https://docs.docker.com/go/compose-spec-reference/
+
+# Here the instructions define your application as a service called "server".
+# This service is built from the Dockerfile in the current directory.
+# You can add other services your application may depend on here, such as a
+# database or a cache. For examples, see the Awesome Compose repository:
+# https://github.com/docker/awesome-compose
+services:
+ server:
+ build:
+ context: .
+ ports:
+ - 8000:8000
+
+# The commented out section below is an example of how to define a PostgreSQL
+# database that your application can use. `depends_on` tells Docker Compose to
+# start the database before your application. The `db-data` volume persists the
+# database data between container restarts. The `db-password` secret is used
+# to set the database password. You must create `db/password.txt` and add
+# a password of your choosing to it before running `docker compose up`.
+# depends_on:
+# db:
+# condition: service_healthy
+# db:
+# image: postgres
+# restart: always
+# user: postgres
+# secrets:
+# - db-password
+# volumes:
+# - db-data:/var/lib/postgresql/data
+# environment:
+# - POSTGRES_DB=example
+# - POSTGRES_PASSWORD_FILE=/run/secrets/db-password
+# expose:
+# - 5432
+# healthcheck:
+# test: [ "CMD", "pg_isready" ]
+# interval: 10s
+# timeout: 5s
+# retries: 5
+# volumes:
+# db-data:
+# secrets:
+# db-password:
+# file: db/password.txt
+
diff --git a/config.yaml b/config.yaml
new file mode 100644
index 0000000..a7086e2
--- /dev/null
+++ b/config.yaml
@@ -0,0 +1,191 @@
+# PyTorch Inference Framework Configuration
+# This file contains the main configuration for the inference framework
+
+# Application Configuration
+app:
+ name: "PyTorch Inference Framework"
+ version: "1.0.0"
+ description: "High-performance PyTorch model inference with optimization and monitoring"
+
+# Server Configuration
+server:
+ host: "0.0.0.0"
+ port: 8000
+ reload: false
+ log_level: "INFO"
+ workers: 1
+
+# Model Configuration
+models:
+ default_model: "example"
+ model_path: "models/"
+ supported_formats:
+ - ".pt"
+ - ".pth"
+ - ".onnx"
+ - ".torchscript"
+
+# Device Configuration
+device:
+ type: "auto" # auto, cpu, cuda, mps
+ id: 0 # GPU ID if using CUDA
+ use_fp16: false
+ use_torch_compile: false
+ memory_fraction: 0.8
+
+# Batch Processing Configuration
+batch:
+ batch_size: 4
+ min_batch_size: 1
+ max_batch_size: 16
+ adaptive_batching: true
+ timeout_seconds: 5.0
+ queue_size: 100
+
+# Preprocessing Configuration
+preprocessing:
+ input_size:
+ width: 224
+ height: 224
+ normalization:
+ mean: [0.485, 0.456, 0.406]
+ std: [0.229, 0.224, 0.225]
+ interpolation: "bilinear"
+ center_crop: true
+ normalize: true
+ to_rgb: true
+
+# Postprocessing Configuration
+postprocessing:
+ threshold: 0.5
+ nms_threshold: 0.5
+ max_detections: 100
+ apply_sigmoid: false
+ apply_softmax: false
+
+# Performance Configuration
+performance:
+ enable_profiling: false
+ enable_metrics: true
+ warmup_iterations: 3
+ benchmark_iterations: 10
+ enable_async: true
+ max_workers: 4
+
+# Caching Configuration
+cache:
+ enable_caching: true
+ cache_size: 100
+ cache_ttl_seconds: 3600
+ disk_cache_path: null
+
+# Security Configuration
+security:
+ max_file_size_mb: 100
+ allowed_extensions:
+ - ".jpg"
+ - ".jpeg"
+ - ".png"
+ - ".bmp"
+ - ".tiff"
+ - ".webp"
+ validate_inputs: true
+ sanitize_outputs: true
+
+# Optimization Configuration
+optimization:
+ enable_jit: true
+ enable_quantization: false
+ quantization_mode: "dynamic" # dynamic, static, qat
+ enable_tensorrt: false
+ tensorrt_precision: "fp32" # fp32, fp16, int8
+ enable_onnx: false
+ onnx_providers:
+ - "CPUExecutionProvider"
+
+# Monitoring Configuration
+monitoring:
+ enable_metrics: true
+ enable_logging: true
+ enable_tracing: false
+ metrics:
+ port: 9090
+ path: "/metrics"
+ retention_days: 30
+ logging:
+ level: "INFO"
+ format: "json"
+ retention_days: 30
+ tracing:
+ service_name: "torch-inference"
+ sampling_rate: 0.1
+ jaeger_endpoint: ""
+ zipkin_endpoint: ""
+
+# Enterprise Configuration (optional)
+enterprise:
+ enabled: false
+ auth:
+ provider: "jwt" # jwt, oauth2, saml, ldap
+ secret_key: ""
+ algorithm: "HS256"
+ access_token_expire_minutes: 30
+ enable_mfa: false
+ enable_api_keys: true
+ rbac:
+ enable_rbac: false
+ default_role: "user"
+ admin_users: []
+ security:
+ enable_encryption_at_rest: false
+ enable_rate_limiting: true
+ rate_limit_requests_per_minute: 100
+ enable_audit_logging: false
+ integration:
+ database_url: ""
+ cache_url: "redis://localhost:6379/0"
+ message_broker_url: ""
+ scaling:
+ enable_auto_scaling: false
+ min_replicas: 1
+ max_replicas: 10
+ cpu_target_utilization: 70
+
+# Environment-specific overrides
+environments:
+ development:
+ server:
+ reload: true
+ log_level: "DEBUG"
+ security:
+ validate_inputs: false
+ performance:
+ enable_profiling: true
+ enterprise:
+ security:
+ enable_rate_limiting: false
+
+ staging:
+ security:
+ validate_inputs: true
+ monitoring:
+ enable_tracing: true
+ enterprise:
+ security:
+ rate_limit_requests_per_minute: 500
+
+ production:
+ server:
+ workers: 4
+ security:
+ validate_inputs: true
+ sanitize_outputs: true
+ enterprise:
+ enabled: true
+ auth:
+ enable_mfa: true
+ security:
+ enable_encryption_at_rest: true
+ enable_audit_logging: true
+ scaling:
+ enable_auto_scaling: true
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000..7bb8b74
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,135 @@
+# ๐ PyTorch Inference Framework Documentation
+
+Welcome to the comprehensive documentation for the PyTorch Inference Framework - a production-ready inference solution with advanced optimization capabilities.
+
+## ๐ Quick Navigation
+
+### ๐ Getting Started
+- **[Installation Guide](installation.md)** - Complete setup instructions
+- **[Quick Start](quickstart.md)** - Basic usage examples
+- **[Configuration](configuration.md)** - Configuration management
+
+### ๐ง Core Features
+- **[Framework Overview](framework.md)** - Architecture and components
+- **[Model Management](models.md)** - Loading and managing models
+- **[Inference Engine](inference.md)** - Sync and async inference
+- **[Optimization](optimization.md)** - TensorRT, ONNX, quantization
+
+### ๐ญ Production Use
+- **[Deployment](deployment.md)** - Docker and production deployment
+- **[Monitoring](monitoring.md)** - Performance monitoring and metrics
+- **[Security](security.md)** - Security features and best practices
+
+### ๐งช Development
+- **[API Reference](api.md)** - Complete API documentation
+- **[Testing](testing.md)** - Test suite and guidelines
+- **[Examples](examples.md)** - Code examples and tutorials
+- **[Contributing](contributing.md)** - Development guidelines
+
+### ๐ Performance
+- **[Benchmarks](benchmarks.md)** - Performance comparisons
+- **[Optimization Guide](optimization-guide.md)** - Advanced performance tuning
+- **[Troubleshooting](troubleshooting.md)** - Common issues and solutions
+
+## ๐ Key Features
+
+### โก Performance Optimizations
+- **2-10x speedup** with TensorRT, ONNX Runtime, and quantization
+- **CUDA graphs** for consistent low latency
+- **Memory pooling** for 30-50% memory reduction
+- **JIT compilation** with 20-50% performance gains
+
+### ๐ Production Ready
+- **Async processing** with dynamic batching
+- **FastAPI integration** with automatic documentation
+- **Real-time monitoring** and profiling
+- **Multi-framework support** (PyTorch, ONNX, TensorRT)
+
+### ๐ง Developer Experience
+- **Modern package management** with `uv`
+- **Type safety** with full annotations
+- **Comprehensive testing** with pytest
+- **Docker support** for easy deployment
+
+## ๐ Documentation Structure
+
+```
+docs/
+โโโ README.md # This overview
+โโโ installation.md # Setup and installation
+โโโ quickstart.md # Getting started guide
+โโโ configuration.md # Configuration management
+โโโ framework.md # Core framework concepts
+โโโ models.md # Model management
+โโโ inference.md # Inference capabilities
+โโโ optimization.md # Performance optimization
+โโโ deployment.md # Production deployment
+โโโ monitoring.md # Monitoring and metrics
+โโโ security.md # Security features
+โโโ api.md # API reference
+โโโ testing.md # Testing documentation
+โโโ examples.md # Examples and tutorials
+โโโ benchmarks.md # Performance benchmarks
+โโโ optimization-guide.md # Advanced optimization
+โโโ troubleshooting.md # Common issues
+โโโ contributing.md # Development guidelines
+```
+
+## ๐ฏ Use Cases
+
+### ๐ผ๏ธ Image Classification
+```python
+from framework import create_pytorch_framework
+
+framework = create_pytorch_framework(
+ model_path="models/resnet50.pt",
+ enable_optimization=True
+)
+
+result = framework.predict(image_tensor)
+```
+
+### ๐ Text Processing
+```python
+from framework import create_async_framework
+
+framework = await create_async_framework(
+ model_path="models/bert.pt",
+ batch_size=16
+)
+
+results = await framework.predict_batch(text_samples)
+```
+
+### ๐ High-Performance API
+```python
+from fastapi import FastAPI
+from framework import create_optimized_framework
+
+framework = create_optimized_framework(
+ optimization_level="aggressive"
+)
+
+app = FastAPI()
+
+@app.post("/predict")
+async def predict(data: InputData):
+ return await framework.predict_async(data)
+```
+
+## ๐ External Resources
+
+- **[GitHub Repository](https://github.com/Evintkoo/torch-inference)**
+- **[PyPI Package](https://pypi.org/project/torch-inference/)**
+- **[Docker Images](https://hub.docker.com/r/evintkoo/torch-inference)**
+- **[Documentation Site](https://evintkoo.github.io/torch-inference/)**
+
+## ๐ Support
+
+- ๐ **Issues**: [GitHub Issues](https://github.com/Evintkoo/torch-inference/issues)
+- ๐ฌ **Discussions**: [GitHub Discussions](https://github.com/Evintkoo/torch-inference/discussions)
+- ๐ง **Email**: [support@torch-inference.dev](mailto:support@torch-inference.dev)
+
+---
+
+*Built with โค๏ธ for the PyTorch community*
diff --git a/docs/configuration.md b/docs/configuration.md
new file mode 100644
index 0000000..f182b69
--- /dev/null
+++ b/docs/configuration.md
@@ -0,0 +1,462 @@
+# โ๏ธ Configuration Management
+
+This guide covers the comprehensive configuration system for the PyTorch Inference Framework, supporting multiple configuration sources with clear precedence rules.
+
+## ๐ง Configuration Overview
+
+The framework supports multiple configuration sources:
+
+1. **Environment Variables** (.env file) - Highest priority
+2. **YAML Configuration** (config.yaml) - Environment-specific overrides
+3. **Default Values** - Built-in fallbacks
+
+## ๐ Configuration Files
+
+### Environment Variables (.env)
+Primary source for environment-specific settings:
+
+```bash
+# Server Configuration
+HOST=0.0.0.0
+PORT=8000
+LOG_LEVEL=INFO
+
+# Device Configuration
+DEVICE=cuda
+USE_FP16=true
+
+# Batch Configuration
+BATCH_SIZE=8
+MAX_BATCH_SIZE=32
+
+# Performance Configuration
+WARMUP_ITERATIONS=10
+ENABLE_TENSORRT=true
+
+# Security Configuration
+MAX_FILE_SIZE_MB=100
+VALIDATE_INPUTS=true
+```
+
+### YAML Configuration (config.yaml)
+Structured configuration with environment overrides:
+
+```yaml
+# Base configuration
+server:
+ host: "0.0.0.0"
+ port: 8000
+ log_level: "INFO"
+ workers: 1
+
+device:
+ type: "auto"
+ use_fp16: false
+ memory_fraction: 0.8
+
+batch:
+ batch_size: 4
+ max_batch_size: 16
+ adaptive_batching: true
+
+optimization:
+ enable_tensorrt: false
+ enable_quantization: false
+ enable_jit: true
+
+# Environment-specific overrides
+environments:
+ development:
+ server:
+ reload: true
+ log_level: "DEBUG"
+ device:
+ type: "cpu"
+ optimization:
+ enable_profiling: true
+
+ staging:
+ server:
+ workers: 2
+ device:
+ use_fp16: true
+ batch:
+ batch_size: 8
+
+ production:
+ server:
+ workers: 4
+ log_level: "WARNING"
+ device:
+ use_fp16: true
+ memory_fraction: 0.9
+ batch:
+ batch_size: 16
+ max_batch_size: 64
+ optimization:
+ enable_tensorrt: true
+ enable_quantization: true
+```
+
+## ๐๏ธ Configuration Architecture
+
+### Configuration Precedence
+
+Values are resolved in the following order (highest to lowest priority):
+
+1. **Environment Variables** (from `.env` file or system)
+2. **YAML Environment Overrides** (environment-specific section)
+3. **YAML Base Configuration** (main configuration)
+4. **Default Values** (hardcoded fallbacks)
+
+### Using Configuration Manager
+
+```python
+from framework.core.config_manager import get_config_manager
+
+# Get the global configuration manager
+config_manager = get_config_manager()
+
+# Get typed configuration objects
+server_config = config_manager.get_server_config()
+inference_config = config_manager.get_inference_config()
+
+# Get individual values with fallbacks
+batch_size = config_manager.get(
+ 'BATCH_SIZE',
+ default=4,
+ config_path='batch.batch_size'
+)
+
+# Environment-specific configuration
+prod_config = ConfigManager(environment='production')
+```
+
+## ๐ง Configuration Categories
+
+### Server Configuration
+
+| Environment Variable | YAML Path | Default | Description |
+|---------------------|-----------|---------|-------------|
+| `HOST` | `server.host` | `"0.0.0.0"` | Server host address |
+| `PORT` | `server.port` | `8000` | Server port number |
+| `LOG_LEVEL` | `server.log_level` | `"INFO"` | Logging level |
+| `RELOAD` | `server.reload` | `false` | Enable auto-reload |
+| `WORKERS` | `server.workers` | `1` | Number of workers |
+
+### Device Configuration
+
+| Environment Variable | YAML Path | Default | Description |
+|---------------------|-----------|---------|-------------|
+| `DEVICE` | `device.type` | `"auto"` | Device type (auto/cpu/cuda/mps) |
+| `DEVICE_ID` | `device.id` | `0` | GPU device ID |
+| `USE_FP16` | `device.use_fp16` | `false` | Enable half precision |
+| `MEMORY_FRACTION` | `device.memory_fraction` | `0.8` | GPU memory fraction |
+| `USE_TORCH_COMPILE` | `device.use_torch_compile` | `false` | Enable torch.compile |
+
+### Batch Configuration
+
+| Environment Variable | YAML Path | Default | Description |
+|---------------------|-----------|---------|-------------|
+| `BATCH_SIZE` | `batch.batch_size` | `4` | Default batch size |
+| `MIN_BATCH_SIZE` | `batch.min_batch_size` | `1` | Minimum batch size |
+| `MAX_BATCH_SIZE` | `batch.max_batch_size` | `16` | Maximum batch size |
+| `ADAPTIVE_BATCHING` | `batch.adaptive_batching` | `false` | Enable adaptive batching |
+| `BATCH_TIMEOUT` | `batch.timeout_seconds` | `0.1` | Batch timeout |
+
+### Optimization Configuration
+
+| Environment Variable | YAML Path | Default | Description |
+|---------------------|-----------|---------|-------------|
+| `ENABLE_TENSORRT` | `optimization.enable_tensorrt` | `false` | Enable TensorRT |
+| `ENABLE_QUANTIZATION` | `optimization.enable_quantization` | `false` | Enable quantization |
+| `ENABLE_JIT` | `optimization.enable_jit` | `true` | Enable JIT compilation |
+| `ENABLE_ONNX` | `optimization.enable_onnx` | `false` | Enable ONNX Runtime |
+| `OPTIMIZATION_LEVEL` | `optimization.level` | `"balanced"` | Optimization level |
+
+### Performance Configuration
+
+| Environment Variable | YAML Path | Default | Description |
+|---------------------|-----------|---------|-------------|
+| `WARMUP_ITERATIONS` | `performance.warmup_iterations` | `5` | Model warmup iterations |
+| `MAX_WORKERS` | `performance.max_workers` | `4` | Maximum worker threads |
+| `ENABLE_PROFILING` | `performance.enable_profiling` | `false` | Enable profiling |
+| `ENABLE_METRICS` | `performance.enable_metrics` | `true` | Enable metrics collection |
+
+### Security Configuration
+
+| Environment Variable | YAML Path | Default | Description |
+|---------------------|-----------|---------|-------------|
+| `MAX_FILE_SIZE_MB` | `security.max_file_size_mb` | `50` | Maximum file size |
+| `ALLOWED_EXTENSIONS` | `security.allowed_extensions` | `[".jpg",".png"]` | Allowed file extensions |
+| `VALIDATE_INPUTS` | `security.validate_inputs` | `true` | Enable input validation |
+| `SANITIZE_OUTPUTS` | `security.sanitize_outputs` | `true` | Enable output sanitization |
+
+## ๐ข Enterprise Configuration
+
+Enable enterprise features:
+
+```bash
+ENTERPRISE_ENABLED=true
+```
+
+### Authentication Configuration
+
+```yaml
+enterprise:
+ auth:
+ enabled: true
+ secret_key: "${JWT_SECRET_KEY}"
+ algorithm: "HS256"
+ expire_minutes: 30
+ oauth2:
+ client_id: "${OAUTH2_CLIENT_ID}"
+ client_secret: "${OAUTH2_CLIENT_SECRET}"
+```
+
+### Security Configuration
+
+```yaml
+enterprise:
+ security:
+ enable_encryption_at_rest: true
+ rate_limit_requests_per_minute: 100
+ enable_audit_logging: true
+ allowed_ips: ["192.168.1.0/24"]
+```
+
+### Integration Configuration
+
+```yaml
+enterprise:
+ integration:
+ database:
+ url: "${DATABASE_URL}"
+ pool_size: 10
+ cache:
+ url: "${CACHE_URL}"
+ ttl_seconds: 3600
+ message_broker:
+ url: "${MESSAGE_BROKER_URL}"
+ exchange: "inference"
+```
+
+## ๐ Environment-Specific Configurations
+
+### Development Environment
+```bash
+ENVIRONMENT=development
+HOST=127.0.0.1
+PORT=8000
+LOG_LEVEL=DEBUG
+RELOAD=true
+DEVICE=cpu
+ENABLE_PROFILING=true
+VALIDATE_INPUTS=false # Relaxed validation
+```
+
+### Staging Environment
+```bash
+ENVIRONMENT=staging
+HOST=0.0.0.0
+PORT=8000
+LOG_LEVEL=INFO
+DEVICE=cuda
+USE_FP16=true
+BATCH_SIZE=8
+MAX_BATCH_SIZE=16
+```
+
+### Production Environment
+```bash
+ENVIRONMENT=production
+HOST=0.0.0.0
+PORT=8000
+LOG_LEVEL=WARNING
+WORKERS=4
+DEVICE=cuda
+USE_FP16=true
+BATCH_SIZE=16
+MAX_BATCH_SIZE=64
+ENTERPRISE_ENABLED=true
+ENABLE_TENSORRT=true
+ENABLE_QUANTIZATION=true
+```
+
+## ๐ Configuration Examples
+
+### Basic Configuration Usage
+
+```python
+from framework.core.config_manager import get_config_manager
+from framework import TorchInferenceFramework
+
+# Initialize with configuration
+config_manager = get_config_manager()
+inference_config = config_manager.get_inference_config()
+
+# Create framework with configuration
+framework = TorchInferenceFramework(config=inference_config)
+framework.load_model("path/to/model.pt")
+
+# Use configuration values
+batch_size = config_manager.get('BATCH_SIZE', default=4)
+results = framework.predict_batch(inputs, batch_size=batch_size)
+```
+
+### Dynamic Configuration
+
+```python
+from framework.core.config_manager import ConfigManager
+
+# Create configuration for specific environment
+prod_manager = ConfigManager(environment='production')
+dev_manager = ConfigManager(environment='development')
+
+# Compare configurations
+prod_config = prod_manager.get_inference_config()
+dev_config = dev_manager.get_inference_config()
+
+print(f"Production batch size: {prod_config.batch.batch_size}")
+print(f"Development batch size: {dev_config.batch.batch_size}")
+```
+
+### Configuration Validation
+
+```python
+from framework.core.config_manager import get_config_manager
+
+config_manager = get_config_manager()
+
+# Validate configuration
+if config_manager.validate_configuration():
+ print("โ
Configuration is valid")
+else:
+ print("โ Configuration has errors")
+ for error in config_manager.get_validation_errors():
+ print(f" - {error}")
+```
+
+## ๐ Configuration Debugging
+
+### View Current Configuration
+
+```python
+from framework.core.config_manager import get_config_manager
+
+config_manager = get_config_manager()
+
+# Print all configuration values
+config_manager.print_configuration()
+
+# Get configuration as dictionary
+config_dict = config_manager.to_dict()
+print(json.dumps(config_dict, indent=2))
+
+# Check configuration sources
+sources = config_manager.get_configuration_sources()
+for key, source in sources.items():
+ print(f"{key}: {source}")
+```
+
+### API Endpoints
+
+The framework provides configuration inspection endpoints:
+
+- `GET /config` - View current configuration
+- `GET /config/sources` - View configuration sources
+- `GET /config/validate` - Validate configuration
+
+### Environment Information
+
+```bash
+# View current environment
+curl http://localhost:8000/
+
+# View configuration
+curl http://localhost:8000/config
+
+# Validate configuration
+curl http://localhost:8000/config/validate
+```
+
+## ๐ Configuration Testing
+
+### Test Configuration Example
+
+```python
+# examples/config_example.py
+from framework.core.config_manager import ConfigManager
+
+def test_configuration_environments():
+ """Test different environment configurations"""
+
+ environments = ['development', 'staging', 'production']
+
+ for env in environments:
+ print(f"\n=== {env.upper()} ENVIRONMENT ===")
+
+ config_manager = ConfigManager(environment=env)
+
+ # Server configuration
+ server_config = config_manager.get_server_config()
+ print(f"Host: {server_config.host}")
+ print(f"Port: {server_config.port}")
+ print(f"Workers: {server_config.workers}")
+ print(f"Log Level: {server_config.log_level}")
+
+ # Device configuration
+ device_config = config_manager.get_inference_config().device
+ print(f"Device: {device_config.device_type}")
+ print(f"FP16: {device_config.use_fp16}")
+
+ # Batch configuration
+ batch_config = config_manager.get_inference_config().batch
+ print(f"Batch Size: {batch_config.batch_size}")
+ print(f"Max Batch Size: {batch_config.max_batch_size}")
+
+if __name__ == "__main__":
+ test_configuration_environments()
+```
+
+Run the test:
+```bash
+uv run python examples/config_example.py
+```
+
+## ๐จ Configuration Best Practices
+
+### Security
+1. **Store secrets in .env**: Never commit sensitive values
+2. **Use environment-specific configs**: Separate dev/staging/prod
+3. **Validate inputs**: Always validate configuration values
+4. **Audit configuration**: Log configuration changes
+
+### Performance
+1. **Cache configuration**: Avoid repeated parsing
+2. **Use appropriate defaults**: Set sensible fallback values
+3. **Profile configuration impact**: Monitor performance effects
+4. **Optimize for your use case**: Tune batch sizes and workers
+
+### Maintainability
+1. **Document configuration**: Explain all options
+2. **Use type hints**: Ensure type safety
+3. **Version configuration**: Track configuration changes
+4. **Test configuration**: Validate all environments
+
+### Deployment
+1. **Environment parity**: Keep environments consistent
+2. **Configuration management**: Use proper config management tools
+3. **Monitoring**: Track configuration-related issues
+4. **Rollback capability**: Plan for configuration rollbacks
+
+## ๐ Related Documentation
+
+- **[Installation Guide](installation.md)** - Setting up configuration files
+- **[Deployment Guide](deployment.md)** - Production configuration
+- **[Security Guide](security.md)** - Secure configuration practices
+- **[API Reference](api.md)** - Configuration API documentation
+
+---
+
+*Need help with configuration? Check the [Troubleshooting Guide](troubleshooting.md) or [open an issue](https://github.com/Evintkoo/torch-inference/issues).*
diff --git a/docs/examples.md b/docs/examples.md
new file mode 100644
index 0000000..77046bf
--- /dev/null
+++ b/docs/examples.md
@@ -0,0 +1,1166 @@
+# ๐ Examples and Tutorials
+
+This guide provides comprehensive examples for using the PyTorch Inference Framework across different scenarios and use cases.
+
+## ๐ Example Structure
+
+The `examples/` directory contains:
+
+```
+examples/
+โโโ README.md # This guide
+โโโ basic_usage.py # Simple synchronous inference
+โโโ async_processing.py # High-throughput async inference
+โโโ fastapi_server.py # Production REST API
+โโโ custom_models.py # Integrating custom models
+โโโ tensorrt_optimization.py # TensorRT optimization
+โโโ onnx_optimization.py # ONNX Runtime optimization
+โโโ quantization_examples.py # Model quantization
+โโโ performance_tuning.py # Advanced performance optimization
+โโโ docker_deployment.py # Docker containerization
+โโโ monitoring_setup.py # Production monitoring
+โโโ config_example.py # Configuration management
+โโโ config_modification_examples.py # Dynamic configuration
+โโโ download_test_models.py # Download models for testing
+```
+
+## ๐ Basic Examples
+
+### 1. Simple Inference (`basic_usage.py`)
+
+```python
+#!/usr/bin/env python3
+"""
+Basic PyTorch Inference Framework Usage Example
+
+This example demonstrates simple synchronous inference patterns
+using the framework with different model types.
+"""
+
+import torch
+import numpy as np
+from pathlib import Path
+import logging
+
+from framework import create_pytorch_framework, TorchInferenceFramework
+from framework.core.config import InferenceConfig, DeviceConfig, BatchConfig
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+def create_sample_model(model_type="linear"):
+ """Create sample models for demonstration"""
+
+ if model_type == "linear":
+ # Simple linear classifier
+ model = torch.nn.Sequential(
+ torch.nn.Linear(784, 128),
+ torch.nn.ReLU(),
+ torch.nn.Linear(128, 10)
+ )
+ elif model_type == "cnn":
+ # Simple CNN for image classification
+ model = torch.nn.Sequential(
+ torch.nn.Conv2d(3, 16, 3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.AdaptiveAvgPool2d((1, 1)),
+ torch.nn.Flatten(),
+ torch.nn.Linear(16, 10)
+ )
+ elif model_type == "complex":
+ # More complex model
+ model = torch.nn.Sequential(
+ torch.nn.Conv2d(3, 32, 3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.Conv2d(32, 64, 3, padding=1),
+ torch.nn.ReLU(),
+ torch.nn.AdaptiveAvgPool2d((4, 4)),
+ torch.nn.Flatten(),
+ torch.nn.Linear(64 * 16, 128),
+ torch.nn.ReLU(),
+ torch.nn.Linear(128, 10)
+ )
+ else:
+ raise ValueError(f"Unknown model type: {model_type}")
+
+ # Initialize weights
+ for module in model.modules():
+ if isinstance(module, (torch.nn.Linear, torch.nn.Conv2d)):
+ torch.nn.init.kaiming_normal_(module.weight)
+ if module.bias is not None:
+ torch.nn.init.zeros_(module.bias)
+
+ model.eval()
+ return model
+
+def example_1_quick_start():
+ """Example 1: Quick start with minimal setup"""
+ print("\n=== Example 1: Quick Start ===")
+
+ # Create and save a simple model
+ model = create_sample_model("linear")
+ model_path = "models/simple_linear.pt"
+ Path("models").mkdir(exist_ok=True)
+ torch.save(model.state_dict(), model_path)
+
+ # Initialize framework with minimal configuration
+ framework = create_pytorch_framework(
+ model_path=model_path,
+ device="cpu" # Use CPU for compatibility
+ )
+
+ # Create sample input (batch of 5 samples)
+ input_data = torch.randn(5, 784)
+
+ # Run inference
+ result = framework.predict(input_data)
+
+ print(f"Input shape: {input_data.shape}")
+ print(f"Output shape: {result.shape}")
+ print(f"Prediction (first sample): {result[0]}")
+ print(f"Predicted classes: {torch.argmax(result, dim=1)}")
+
+ return framework
+
+def example_2_custom_configuration():
+ """Example 2: Custom configuration"""
+ print("\n=== Example 2: Custom Configuration ===")
+
+ # Create more complex model
+ model = create_sample_model("cnn")
+ model_path = "models/simple_cnn.pt"
+ torch.save(model.state_dict(), model_path)
+
+ # Custom configuration
+ config = InferenceConfig(
+ model_path=model_path,
+ device=DeviceConfig(
+ device_type="cpu",
+ use_fp16=False # FP16 not supported on CPU
+ ),
+ batch=BatchConfig(
+ batch_size=8,
+ max_batch_size=16
+ )
+ )
+
+ # Initialize framework with configuration
+ framework = TorchInferenceFramework(config=config)
+ framework.initialize()
+
+ # Create sample image input
+ input_data = torch.randn(3, 3, 32, 32) # 3 RGB images, 32x32
+
+ # Run inference
+ result = framework.predict(input_data)
+
+ print(f"Input shape: {input_data.shape}")
+ print(f"Output shape: {result.shape}")
+ print(f"Configuration used:")
+ print(f" Device: {config.device.device_type}")
+ print(f" Batch size: {config.batch.batch_size}")
+
+ framework.cleanup()
+ return result
+
+def example_3_batch_processing():
+ """Example 3: Batch processing"""
+ print("\n=== Example 3: Batch Processing ===")
+
+ # Use existing model
+ model_path = "models/simple_linear.pt"
+
+ framework = create_pytorch_framework(
+ model_path=model_path,
+ device="cpu",
+ batch_size=16 # Process in batches of 16
+ )
+
+ # Create larger dataset
+ num_samples = 100
+ all_inputs = [torch.randn(1, 784) for _ in range(num_samples)]
+
+ # Process as batch
+ print(f"Processing {num_samples} samples in batches...")
+ all_results = framework.predict_batch(all_inputs)
+
+ print(f"Processed {len(all_results)} predictions")
+ print(f"First prediction shape: {all_results[0].shape}")
+
+ # Calculate accuracy on dummy labels
+ dummy_labels = torch.randint(0, 10, (num_samples,))
+ predicted_classes = torch.cat([torch.argmax(r, dim=1) for r in all_results])
+ accuracy = (predicted_classes == dummy_labels).float().mean()
+ print(f"Dummy accuracy: {accuracy:.2%}")
+
+ return all_results
+
+def example_4_performance_monitoring():
+ """Example 4: Performance monitoring"""
+ print("\n=== Example 4: Performance Monitoring ===")
+
+ from framework import create_monitored_framework
+ import time
+
+ # Create framework with monitoring
+ framework = create_monitored_framework(
+ model_path="models/simple_linear.pt",
+ enable_detailed_metrics=True
+ )
+
+ # Run multiple predictions for statistics
+ input_data = torch.randn(1, 784)
+
+ print("Running performance test...")
+ for i in range(20):
+ result = framework.predict(input_data)
+ if i % 5 == 0:
+ print(f"Completed {i+1}/20 predictions")
+
+ # Get performance metrics
+ metrics = framework.get_metrics()
+ print(f"\nPerformance Metrics:")
+ print(f" Average latency: {metrics.get('latency', {}).get('avg_ms', 0):.2f}ms")
+ print(f" Total predictions: {metrics.get('predictions', {}).get('count', 0)}")
+ print(f" Throughput: {metrics.get('throughput', {}).get('requests_per_second', 0):.1f} req/s")
+
+ return metrics
+
+def example_5_error_handling():
+ """Example 5: Error handling and validation"""
+ print("\n=== Example 5: Error Handling ===")
+
+ framework = create_pytorch_framework(
+ model_path="models/simple_linear.pt",
+ device="cpu"
+ )
+
+ # Test with correct input
+ correct_input = torch.randn(2, 784)
+ try:
+ result = framework.predict(correct_input)
+ print(f"โ
Correct input processed: {result.shape}")
+ except Exception as e:
+ print(f"โ Unexpected error: {e}")
+
+ # Test with incorrect input shape
+ wrong_input = torch.randn(2, 100) # Wrong feature size
+ try:
+ result = framework.predict(wrong_input)
+ print(f"โ ๏ธ Wrong input somehow worked: {result.shape}")
+ except Exception as e:
+ print(f"โ
Correctly caught error: {type(e).__name__}: {e}")
+
+ # Test with invalid input type
+ try:
+ result = framework.predict("invalid_input")
+ print(f"โ ๏ธ String input somehow worked")
+ except Exception as e:
+ print(f"โ
Correctly caught type error: {type(e).__name__}")
+
+ # Test framework health
+ health = framework.get_health_status()
+ print(f"\nFramework health: {health.get('status', 'unknown')}")
+
+def main():
+ """Run all basic examples"""
+ print("๐ PyTorch Inference Framework - Basic Examples")
+ print("=" * 50)
+
+ try:
+ # Run examples in sequence
+ example_1_quick_start()
+ example_2_custom_configuration()
+ example_3_batch_processing()
+ example_4_performance_monitoring()
+ example_5_error_handling()
+
+ print("\n๐ All examples completed successfully!")
+ print("\nNext steps:")
+ print(" - Try async_processing.py for high-performance async inference")
+ print(" - See fastapi_server.py for REST API deployment")
+ print(" - Check optimization examples for performance tuning")
+
+ except Exception as e:
+ print(f"\nโ Example failed: {e}")
+ logger.exception("Example execution failed")
+ return 1
+
+ return 0
+
+if __name__ == "__main__":
+ exit(main())
+```
+
+### 2. Async Processing (`async_processing.py`)
+
+```python
+#!/usr/bin/env python3
+"""
+Async Processing Example
+
+Demonstrates high-throughput async inference with dynamic batching,
+concurrent processing, and performance optimization.
+"""
+
+import asyncio
+import torch
+import time
+import random
+from pathlib import Path
+import logging
+from typing import List
+
+from framework import create_async_framework
+from framework.core.config import InferenceConfig, BatchConfig, DeviceConfig
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+async def example_1_basic_async():
+ """Example 1: Basic async inference"""
+ print("\n=== Example 1: Basic Async Inference ===")
+
+ # Create async framework
+ framework = await create_async_framework(
+ model_path="models/simple_linear.pt",
+ batch_size=4,
+ max_batch_delay=0.05 # 50ms max batching delay
+ )
+
+ # Single async prediction
+ input_data = torch.randn(1, 784)
+ result = await framework.predict_async(input_data)
+
+ print(f"Async prediction shape: {result.shape}")
+ print(f"Async prediction: {result}")
+
+ await framework.close()
+ return result
+
+async def example_2_concurrent_processing():
+ """Example 2: Concurrent request processing"""
+ print("\n=== Example 2: Concurrent Processing ===")
+
+ framework = await create_async_framework(
+ model_path="models/simple_linear.pt",
+ batch_size=8,
+ max_batch_size=16,
+ max_batch_delay=0.1
+ )
+
+ # Create multiple concurrent requests
+ num_requests = 20
+ concurrent_inputs = [torch.randn(1, 784) for _ in range(num_requests)]
+
+ print(f"Processing {num_requests} concurrent requests...")
+
+ # Submit all requests at once
+ start_time = time.time()
+ tasks = [framework.predict_async(inp) for inp in concurrent_inputs]
+ results = await asyncio.gather(*tasks)
+ end_time = time.time()
+
+ print(f"โ
Processed {len(results)} requests in {end_time - start_time:.3f}s")
+ print(f"Average latency per request: {(end_time - start_time) / num_requests * 1000:.1f}ms")
+ print(f"Throughput: {num_requests / (end_time - start_time):.1f} req/s")
+
+ await framework.close()
+ return results
+
+async def example_3_streaming_processing():
+ """Example 3: Streaming request processing"""
+ print("\n=== Example 3: Streaming Processing ===")
+
+ framework = await create_async_framework(
+ model_path="models/simple_linear.pt",
+ batch_size=4,
+ adaptive_batching=True # Enable adaptive batching
+ )
+
+ async def request_generator():
+ """Generate requests at varying intervals"""
+ for i in range(30):
+ # Simulate varying request rates
+ await asyncio.sleep(random.uniform(0.01, 0.1))
+ yield torch.randn(1, 784), i
+
+ async def process_streaming_requests():
+ """Process requests as they arrive"""
+ results = []
+ async for input_data, request_id in request_generator():
+ result = await framework.predict_async(input_data)
+ results.append((request_id, result))
+
+ if len(results) % 10 == 0:
+ print(f"Processed {len(results)} streaming requests...")
+
+ return results
+
+ print("Processing streaming requests...")
+ start_time = time.time()
+ streaming_results = await process_streaming_requests()
+ end_time = time.time()
+
+ print(f"โ
Processed {len(streaming_results)} streaming requests")
+ print(f"Total time: {end_time - start_time:.3f}s")
+ print(f"Average throughput: {len(streaming_results) / (end_time - start_time):.1f} req/s")
+
+ await framework.close()
+ return streaming_results
+
+async def example_4_batch_optimization():
+ """Example 4: Batch size optimization"""
+ print("\n=== Example 4: Batch Size Optimization ===")
+
+ # Test different batch configurations
+ batch_configs = [
+ {"batch_size": 1, "max_batch_size": 1}, # No batching
+ {"batch_size": 4, "max_batch_size": 8}, # Small batches
+ {"batch_size": 8, "max_batch_size": 16}, # Medium batches
+ {"batch_size": 16, "max_batch_size": 32}, # Large batches
+ ]
+
+ test_requests = [torch.randn(1, 784) for _ in range(50)]
+ results = {}
+
+ for config in batch_configs:
+ print(f"\nTesting batch config: {config}")
+
+ framework = await create_async_framework(
+ model_path="models/simple_linear.pt",
+ **config,
+ max_batch_delay=0.05
+ )
+
+ # Benchmark this configuration
+ start_time = time.time()
+ tasks = [framework.predict_async(inp) for inp in test_requests]
+ batch_results = await asyncio.gather(*tasks)
+ end_time = time.time()
+
+ total_time = end_time - start_time
+ throughput = len(test_requests) / total_time
+
+ results[str(config)] = {
+ "total_time": total_time,
+ "throughput": throughput,
+ "avg_latency": total_time / len(test_requests) * 1000
+ }
+
+ print(f" Total time: {total_time:.3f}s")
+ print(f" Throughput: {throughput:.1f} req/s")
+ print(f" Avg latency: {total_time / len(test_requests) * 1000:.1f}ms")
+
+ await framework.close()
+
+ # Find best configuration
+ best_config = max(results.items(), key=lambda x: x[1]["throughput"])
+ print(f"\n๐ Best configuration: {best_config[0]}")
+ print(f" Throughput: {best_config[1]['throughput']:.1f} req/s")
+
+ return results
+
+async def example_5_error_handling_async():
+ """Example 5: Async error handling"""
+ print("\n=== Example 5: Async Error Handling ===")
+
+ framework = await create_async_framework(
+ model_path="models/simple_linear.pt",
+ batch_size=4
+ )
+
+ # Mix of valid and invalid requests
+ requests = [
+ torch.randn(1, 784), # Valid
+ torch.randn(1, 100), # Invalid shape
+ torch.randn(1, 784), # Valid
+ "invalid_input", # Invalid type
+ torch.randn(1, 784), # Valid
+ ]
+
+ async def safe_predict(inp, request_id):
+ """Safely handle prediction with error catching"""
+ try:
+ result = await framework.predict_async(inp)
+ return {"id": request_id, "status": "success", "result": result}
+ except Exception as e:
+ return {"id": request_id, "status": "error", "error": str(e)}
+
+ # Process all requests with error handling
+ print("Processing mixed valid/invalid requests...")
+ tasks = [safe_predict(inp, i) for i, inp in enumerate(requests)]
+ results = await asyncio.gather(*tasks)
+
+ # Analyze results
+ successful = [r for r in results if r["status"] == "success"]
+ failed = [r for r in results if r["status"] == "error"]
+
+ print(f"โ
Successful requests: {len(successful)}")
+ print(f"โ Failed requests: {len(failed)}")
+
+ for result in results:
+ status_icon = "โ
" if result["status"] == "success" else "โ"
+ if result["status"] == "success":
+ print(f" {status_icon} Request {result['id']}: Success")
+ else:
+ print(f" {status_icon} Request {result['id']}: {result['error']}")
+
+ await framework.close()
+ return results
+
+async def example_6_performance_monitoring():
+ """Example 6: Performance monitoring in async context"""
+ print("\n=== Example 6: Performance Monitoring ===")
+
+ from framework import create_monitored_framework
+
+ # Create monitored async framework
+ framework = await create_monitored_framework(
+ model_path="models/simple_linear.pt",
+ batch_size=8,
+ enable_detailed_metrics=True,
+ async_mode=True
+ )
+
+ # Run sustained load test
+ print("Running sustained load test...")
+ test_duration = 5 # seconds
+
+ async def sustained_load():
+ """Generate sustained load"""
+ results = []
+ start_time = time.time()
+
+ while time.time() - start_time < test_duration:
+ input_data = torch.randn(1, 784)
+ result = await framework.predict_async(input_data)
+ results.append(result)
+
+ # Small delay between requests
+ await asyncio.sleep(0.01)
+
+ return results
+
+ load_results = await sustained_load()
+
+ # Get detailed metrics
+ metrics = await framework.get_metrics_async()
+
+ print(f"\nSustained Load Results:")
+ print(f" Duration: {test_duration}s")
+ print(f" Total requests: {len(load_results)}")
+ print(f" Average throughput: {len(load_results) / test_duration:.1f} req/s")
+
+ if metrics:
+ print(f" Average latency: {metrics.get('latency', {}).get('avg_ms', 0):.2f}ms")
+ print(f" 95th percentile: {metrics.get('latency', {}).get('p95_ms', 0):.2f}ms")
+ print(f" Batch efficiency: {metrics.get('batching', {}).get('efficiency', 0):.1%}")
+
+ await framework.close()
+ return metrics
+
+async def main():
+ """Run all async examples"""
+ print("๐ PyTorch Inference Framework - Async Processing Examples")
+ print("=" * 60)
+
+ # Ensure model exists
+ Path("models").mkdir(exist_ok=True)
+ if not Path("models/simple_linear.pt").exists():
+ print("Creating sample model...")
+ model = torch.nn.Sequential(
+ torch.nn.Linear(784, 128),
+ torch.nn.ReLU(),
+ torch.nn.Linear(128, 10)
+ )
+ model.eval()
+ torch.save(model.state_dict(), "models/simple_linear.pt")
+
+ try:
+ # Run async examples
+ await example_1_basic_async()
+ await example_2_concurrent_processing()
+ await example_3_streaming_processing()
+ await example_4_batch_optimization()
+ await example_5_error_handling_async()
+ await example_6_performance_monitoring()
+
+ print("\n๐ All async examples completed successfully!")
+ print("\nKey takeaways:")
+ print(" - Async processing enables high throughput")
+ print(" - Dynamic batching improves efficiency")
+ print(" - Proper error handling is essential")
+ print(" - Monitor performance for optimization")
+
+ except Exception as e:
+ print(f"\nโ Async example failed: {e}")
+ logger.exception("Async example execution failed")
+ return 1
+
+ return 0
+
+if __name__ == "__main__":
+ exit(asyncio.run(main()))
+```
+
+### 3. FastAPI Server (`fastapi_server.py`)
+
+```python
+#!/usr/bin/env python3
+"""
+FastAPI Production Server Example
+
+Production-ready REST API server with the PyTorch Inference Framework.
+Includes authentication, monitoring, error handling, and documentation.
+"""
+
+from fastapi import FastAPI, File, UploadFile, HTTPException, Depends, status
+from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.middleware.gzip import GZipMiddleware
+from fastapi.responses import JSONResponse
+from pydantic import BaseModel, Field
+from typing import List, Optional, Dict, Any
+import torch
+import torch.nn.functional as F
+import torchvision.transforms as transforms
+from PIL import Image
+import io
+import logging
+import time
+import asyncio
+from pathlib import Path
+
+from framework import create_optimized_framework, create_monitored_framework
+from framework.core.config_manager import get_config_manager
+
+# Configure logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+# Security
+security = HTTPBearer(auto_error=False)
+
+# Pydantic models
+class PredictionResponse(BaseModel):
+ """Response model for predictions"""
+ prediction: List[float] = Field(..., description="Model predictions")
+ predicted_class: int = Field(..., description="Predicted class index")
+ confidence: float = Field(..., description="Confidence score")
+ processing_time_ms: float = Field(..., description="Processing time in milliseconds")
+ model_info: Dict[str, Any] = Field(..., description="Model information")
+
+class HealthResponse(BaseModel):
+ """Response model for health check"""
+ status: str = Field(..., description="Service status")
+ timestamp: float = Field(..., description="Health check timestamp")
+ version: str = Field(..., description="API version")
+ model_loaded: bool = Field(..., description="Whether model is loaded")
+ predictions_served: int = Field(..., description="Total predictions served")
+ uptime_seconds: float = Field(..., description="Service uptime")
+ performance_metrics: Optional[Dict[str, Any]] = Field(None, description="Performance metrics")
+
+class BatchPredictionRequest(BaseModel):
+ """Request model for batch predictions"""
+ inputs: List[List[float]] = Field(..., description="Batch of input vectors")
+ return_probabilities: bool = Field(True, description="Return probability distributions")
+
+class ConfigResponse(BaseModel):
+ """Response model for configuration info"""
+ environment: str = Field(..., description="Current environment")
+ device: str = Field(..., description="Compute device")
+ batch_size: int = Field(..., description="Batch size")
+ optimization_enabled: bool = Field(..., description="Whether optimization is enabled")
+ features: Dict[str, bool] = Field(..., description="Available features")
+
+# Global state
+app_state = {
+ "framework": None,
+ "startup_time": time.time(),
+ "prediction_count": 0,
+ "config_manager": None
+}
+
+# Create FastAPI app
+app = FastAPI(
+ title="PyTorch Inference API",
+ description="Production-ready PyTorch inference API with optimization and monitoring",
+ version="1.0.0",
+ docs_url="/docs",
+ redoc_url="/redoc",
+ openapi_url="/openapi.json"
+)
+
+# Add middleware
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"], # Configure appropriately for production
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+app.add_middleware(GZipMiddleware, minimum_size=1000)
+
+# Image preprocessing
+image_transform = transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+])
+
+def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)):
+ """Simple token verification (replace with proper auth in production)"""
+ if not credentials:
+ return None
+
+ # Simple token check (use proper JWT validation in production)
+ if credentials.credentials == "demo-token-12345":
+ return {"user": "demo_user", "scope": "read_write"}
+
+ return None
+
+def get_current_user(user = Depends(verify_token)):
+ """Get current authenticated user (optional)"""
+ return user
+
+@app.on_event("startup")
+async def startup_event():
+ """Initialize the inference framework on startup"""
+ logger.info("Starting PyTorch Inference API...")
+
+ try:
+ # Get configuration
+ config_manager = get_config_manager()
+ app_state["config_manager"] = config_manager
+
+ # Initialize framework
+ model_path = "models/simple_linear.pt"
+
+ # Ensure model exists
+ Path("models").mkdir(exist_ok=True)
+ if not Path(model_path).exists():
+ logger.info("Creating sample model...")
+ model = torch.nn.Sequential(
+ torch.nn.Linear(784, 128),
+ torch.nn.ReLU(),
+ torch.nn.Linear(128, 10)
+ )
+ model.eval()
+ torch.save(model.state_dict(), model_path)
+
+ # Create optimized and monitored framework
+ app_state["framework"] = await create_monitored_framework(
+ model_path=model_path,
+ optimization_level="balanced",
+ enable_detailed_metrics=True,
+ async_mode=True
+ )
+
+ logger.info("โ
Framework initialized successfully")
+
+ except Exception as e:
+ logger.error(f"โ Failed to initialize framework: {e}")
+ raise
+
+@app.on_event("shutdown")
+async def shutdown_event():
+ """Cleanup on shutdown"""
+ logger.info("Shutting down PyTorch Inference API...")
+
+ if app_state["framework"]:
+ await app_state["framework"].close()
+
+ logger.info("โ
Shutdown complete")
+
+@app.get("/", response_model=Dict[str, Any])
+async def root():
+ """API information and status"""
+ config_manager = app_state.get("config_manager")
+ uptime = time.time() - app_state["startup_time"]
+
+ return {
+ "message": "PyTorch Inference API",
+ "version": "1.0.0",
+ "status": "healthy",
+ "uptime_seconds": uptime,
+ "environment": config_manager.environment if config_manager else "unknown",
+ "endpoints": {
+ "POST /predict": "Single prediction",
+ "POST /predict/batch": "Batch prediction",
+ "POST /predict/image": "Image classification",
+ "GET /health": "Health check",
+ "GET /config": "Configuration info",
+ "GET /metrics": "Performance metrics",
+ "GET /docs": "API documentation"
+ },
+ "authentication": {
+ "required": False,
+ "demo_token": "demo-token-12345"
+ }
+ }
+
+@app.post("/predict", response_model=PredictionResponse)
+async def predict(
+ input_data: List[float],
+ user = Depends(get_current_user)
+):
+ """Single prediction endpoint"""
+ try:
+ start_time = time.time()
+
+ # Validate input
+ if len(input_data) != 784:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Expected 784 features, got {len(input_data)}"
+ )
+
+ # Convert to tensor
+ input_tensor = torch.tensor(input_data).float().unsqueeze(0)
+
+ # Run inference
+ framework = app_state["framework"]
+ if not framework:
+ raise HTTPException(status_code=503, detail="Model not loaded")
+
+ result = await framework.predict_async(input_tensor)
+
+ # Process results
+ probabilities = F.softmax(result, dim=1)[0]
+ predicted_class = torch.argmax(probabilities).item()
+ confidence = probabilities[predicted_class].item()
+
+ processing_time = (time.time() - start_time) * 1000
+ app_state["prediction_count"] += 1
+
+ return PredictionResponse(
+ prediction=probabilities.tolist(),
+ predicted_class=predicted_class,
+ confidence=confidence,
+ processing_time_ms=processing_time,
+ model_info={
+ "model_type": "linear_classifier",
+ "num_classes": 10,
+ "input_features": 784
+ }
+ )
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Prediction error: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+@app.post("/predict/batch")
+async def predict_batch(
+ request: BatchPredictionRequest,
+ user = Depends(get_current_user)
+):
+ """Batch prediction endpoint"""
+ try:
+ start_time = time.time()
+
+ # Validate inputs
+ if not request.inputs:
+ raise HTTPException(status_code=400, detail="No inputs provided")
+
+ if len(request.inputs) > 100:
+ raise HTTPException(status_code=400, detail="Batch size too large (max 100)")
+
+ # Validate input dimensions
+ for i, inp in enumerate(request.inputs):
+ if len(inp) != 784:
+ raise HTTPException(
+ status_code=400,
+ detail=f"Input {i}: expected 784 features, got {len(inp)}"
+ )
+
+ # Convert to tensor
+ input_tensor = torch.tensor(request.inputs).float()
+
+ # Run batch inference
+ framework = app_state["framework"]
+ if not framework:
+ raise HTTPException(status_code=503, detail="Model not loaded")
+
+ result = await framework.predict_async(input_tensor)
+
+ # Process results
+ if request.return_probabilities:
+ probabilities = F.softmax(result, dim=1)
+ predictions = probabilities.tolist()
+ else:
+ predictions = torch.argmax(result, dim=1).tolist()
+
+ processing_time = (time.time() - start_time) * 1000
+ app_state["prediction_count"] += len(request.inputs)
+
+ return {
+ "predictions": predictions,
+ "batch_size": len(request.inputs),
+ "processing_time_ms": processing_time,
+ "predictions_per_second": len(request.inputs) / (processing_time / 1000)
+ }
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Batch prediction error: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+@app.post("/predict/image")
+async def predict_image(
+ file: UploadFile = File(...),
+ user = Depends(get_current_user)
+):
+ """Image classification endpoint"""
+ try:
+ start_time = time.time()
+
+ # Validate file type
+ if not file.content_type.startswith("image/"):
+ raise HTTPException(
+ status_code=400,
+ detail="File must be an image"
+ )
+
+ # Read and process image
+ image_data = await file.read()
+ image = Image.open(io.BytesIO(image_data)).convert('RGB')
+
+ # Preprocess image
+ input_tensor = image_transform(image).unsqueeze(0)
+
+ # For demo, we'll flatten the image to match our linear model
+ # In practice, you'd use a CNN model
+ flattened_input = input_tensor.view(1, -1)
+
+ # Pad or truncate to 784 features
+ if flattened_input.shape[1] > 784:
+ flattened_input = flattened_input[:, :784]
+ elif flattened_input.shape[1] < 784:
+ padding = torch.zeros(1, 784 - flattened_input.shape[1])
+ flattened_input = torch.cat([flattened_input, padding], dim=1)
+
+ # Run inference
+ framework = app_state["framework"]
+ if not framework:
+ raise HTTPException(status_code=503, detail="Model not loaded")
+
+ result = await framework.predict_async(flattened_input)
+
+ # Process results
+ probabilities = F.softmax(result, dim=1)[0]
+ predicted_class = torch.argmax(probabilities).item()
+ confidence = probabilities[predicted_class].item()
+
+ processing_time = (time.time() - start_time) * 1000
+ app_state["prediction_count"] += 1
+
+ return {
+ "filename": file.filename,
+ "predicted_class": predicted_class,
+ "confidence": confidence,
+ "processing_time_ms": processing_time,
+ "image_info": {
+ "size": image.size,
+ "mode": image.mode,
+ "format": image.format
+ }
+ }
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.error(f"Image prediction error: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+@app.get("/health", response_model=HealthResponse)
+async def health_check():
+ """Health check endpoint"""
+ framework = app_state["framework"]
+ uptime = time.time() - app_state["startup_time"]
+
+ # Get performance metrics if available
+ performance_metrics = None
+ if framework:
+ try:
+ performance_metrics = await framework.get_metrics_async()
+ except:
+ pass # Metrics not available
+
+ return HealthResponse(
+ status="healthy" if framework else "degraded",
+ timestamp=time.time(),
+ version="1.0.0",
+ model_loaded=framework is not None,
+ predictions_served=app_state["prediction_count"],
+ uptime_seconds=uptime,
+ performance_metrics=performance_metrics
+ )
+
+@app.get("/config", response_model=ConfigResponse)
+async def get_config():
+ """Get configuration information"""
+ config_manager = app_state.get("config_manager")
+
+ if not config_manager:
+ raise HTTPException(status_code=503, detail="Configuration not available")
+
+ inference_config = config_manager.get_inference_config()
+
+ return ConfigResponse(
+ environment=config_manager.environment,
+ device=str(inference_config.device.device_type),
+ batch_size=inference_config.batch.batch_size,
+ optimization_enabled=any([
+ inference_config.optimization.enable_tensorrt,
+ inference_config.optimization.enable_quantization,
+ inference_config.optimization.enable_jit
+ ]),
+ features={
+ "async_processing": True,
+ "batch_processing": True,
+ "image_processing": True,
+ "monitoring": True,
+ "authentication": False # Demo only
+ }
+ )
+
+@app.get("/metrics")
+async def get_metrics(user = Depends(get_current_user)):
+ """Get performance metrics (requires authentication in production)"""
+ framework = app_state["framework"]
+
+ if not framework:
+ raise HTTPException(status_code=503, detail="Framework not available")
+
+ try:
+ metrics = await framework.get_metrics_async()
+ return {
+ "metrics": metrics,
+ "total_predictions": app_state["prediction_count"],
+ "uptime_seconds": time.time() - app_state["startup_time"],
+ "timestamp": time.time()
+ }
+ except Exception as e:
+ logger.error(f"Metrics error: {e}")
+ raise HTTPException(status_code=500, detail="Metrics unavailable")
+
+@app.exception_handler(Exception)
+async def global_exception_handler(request, exc):
+ """Global exception handler"""
+ logger.error(f"Unhandled exception: {exc}", exc_info=True)
+ return JSONResponse(
+ status_code=500,
+ content={
+ "error": "Internal server error",
+ "detail": str(exc) if app.debug else "An error occurred"
+ }
+ )
+
+# Development server
+if __name__ == "__main__":
+ import uvicorn
+
+ print("๐ Starting PyTorch Inference API Server")
+ print("๐ API Documentation: http://localhost:8000/docs")
+ print("๐ Health Check: http://localhost:8000/health")
+ print("โ๏ธ Configuration: http://localhost:8000/config")
+ print("๐ Demo Token: demo-token-12345")
+
+ uvicorn.run(
+ "fastapi_server:app",
+ host="0.0.0.0",
+ port=8000,
+ reload=True,
+ log_level="info"
+ )
+```
+
+## ๐ฏ Running the Examples
+
+### Setup
+
+```bash
+# Ensure you're in the project root
+cd torch-inference
+
+# Install dependencies
+uv sync --extra dev
+
+# Create models directory
+mkdir -p models
+
+# Download test models (optional)
+uv run python examples/download_test_models.py
+```
+
+### Running Individual Examples
+
+```bash
+# Basic usage patterns
+uv run python examples/basic_usage.py
+
+# High-performance async processing
+uv run python examples/async_processing.py
+
+# Production REST API server
+uv run python examples/fastapi_server.py
+
+# Test the API (in another terminal)
+curl -X POST "http://localhost:8000/predict" \
+ -H "Content-Type: application/json" \
+ -d '{"input_data": [0.1, 0.2, ...]}' # 784 numbers
+```
+
+### Example Output
+
+When running `basic_usage.py`:
+
+```
+๐ PyTorch Inference Framework - Basic Examples
+==================================================
+
+=== Example 1: Quick Start ===
+Input shape: torch.Size([5, 784])
+Output shape: torch.Size([5, 10])
+Prediction (first sample): tensor([-0.2435, 0.1234, ...])
+Predicted classes: tensor([7, 2, 1, 9, 4])
+
+=== Example 2: Custom Configuration ===
+Input shape: torch.Size([3, 3, 32, 32])
+Output shape: torch.Size([3, 10])
+Configuration used:
+ Device: cpu
+ Batch size: 8
+
+...
+
+๐ All examples completed successfully!
+```
+
+## ๐ Advanced Examples
+
+For more advanced examples, see:
+
+- **[Performance Optimization](optimization-guide.md)** - TensorRT, ONNX, quantization
+- **[Deployment Guide](deployment.md)** - Docker, Kubernetes, scaling
+- **[Monitoring Guide](monitoring.md)** - Production monitoring setup
+- **[API Reference](api.md)** - Complete API documentation
+
+## ๐ก Tips for Using Examples
+
+1. **Start Simple**: Begin with `basic_usage.py` to understand core concepts
+2. **Progress Gradually**: Move to async examples for production workloads
+3. **Customize**: Adapt examples to your specific models and use cases
+4. **Monitor Performance**: Use the monitoring examples to optimize your setup
+5. **Handle Errors**: Learn from error handling examples for robust applications
+
+---
+
+*Ready to build your own application? Use these examples as starting points and refer to the [API Reference](api.md) for detailed documentation.*
diff --git a/docs/installation.md b/docs/installation.md
new file mode 100644
index 0000000..96cb925
--- /dev/null
+++ b/docs/installation.md
@@ -0,0 +1,422 @@
+# ๐ฆ Installation Guide
+
+This guide covers the complete installation process for the PyTorch Inference Framework.
+
+## ๐ฏ Prerequisites
+
+### System Requirements
+- **Python**: 3.10+ (3.11+ recommended)
+- **Operating System**: Windows 10/11, Linux (Ubuntu 18.04+), macOS 10.15+
+- **Memory**: 8GB+ RAM (16GB+ recommended)
+
+### GPU Requirements (Optional but Recommended)
+- **NVIDIA GPU**: Compute capability 7.0+ (RTX 20/30/40 series, Tesla V100, A100, H100)
+- **CUDA**: 12.4+ (for TensorRT optimization)
+- **GPU Memory**: 6GB+ VRAM (8GB+ recommended for large models)
+
+## ๐ Quick Installation with uv
+
+The framework uses `uv` for fast, reliable dependency management.
+
+### 1. Install uv
+
+**Option A: Using pip**
+```bash
+pip install uv
+```
+
+**Option B: Official installer (Linux/macOS)**
+```bash
+curl -LsSf https://astral.sh/uv/install.sh | sh
+```
+
+**Option C: Official installer (Windows)**
+```powershell
+powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
+```
+
+### 2. Clone and Setup
+
+**Windows (PowerShell)**
+```powershell
+# Clone repository
+git clone https://github.com/Evintkoo/torch-inference.git
+cd torch-inference
+
+# Run automated setup
+.\setup_uv.ps1
+
+# Verify installation
+uv run python test_installation.py
+```
+
+**Linux/macOS (Bash)**
+```bash
+# Clone repository
+git clone https://github.com/Evintkoo/torch-inference.git
+cd torch-inference
+
+# Run automated setup
+chmod +x setup_uv.sh
+./setup_uv.sh
+
+# Verify installation
+uv run python test_installation.py
+```
+
+### 3. Manual Setup (Alternative)
+
+```bash
+# Install all dependencies
+uv sync
+
+# Install with GPU support
+uv sync --extra cuda
+
+# Install with all features
+uv sync --extra all
+
+# Verify installation
+uv run python test_installation.py
+```
+
+## ๐ง Installation Options
+
+### Base Installation
+```bash
+# Core framework only
+uv sync
+
+# This includes:
+# - PyTorch CPU
+# - FastAPI
+# - Basic optimization
+```
+
+### GPU Support
+```bash
+# CUDA support
+uv sync --extra cuda
+
+# This adds:
+# - PyTorch CUDA
+# - CUDA optimizations
+# - GPU memory management
+```
+
+### TensorRT Optimization
+```bash
+# TensorRT support
+uv sync --extra tensorrt
+
+# This adds:
+# - TensorRT runtime
+# - torch-tensorrt
+# - Advanced GPU optimization
+```
+
+### Development Setup
+```bash
+# Development tools
+uv sync --extra dev
+
+# This adds:
+# - Testing framework (pytest)
+# - Code formatting (black, ruff)
+# - Type checking (mypy)
+# - Pre-commit hooks
+```
+
+### Complete Installation
+```bash
+# All features
+uv sync --extra all
+
+# Equivalent to:
+uv sync --extra cuda,tensorrt,onnx,dev,docs
+```
+
+## ๐ Python Environment Setup
+
+### Using Conda (Recommended for GPU)
+```bash
+# Create environment with CUDA support
+conda create -n torch-inference python=3.11
+conda activate torch-inference
+
+# Install CUDA toolkit
+conda install pytorch torchvision torchaudio pytorch-cuda=12.1 -c pytorch -c nvidia
+
+# Install framework
+cd torch-inference
+uv sync --no-install-project # Skip PyTorch reinstall
+```
+
+### Using Python Virtual Environment
+```bash
+# Create virtual environment
+python -m venv torch-inference
+source torch-inference/bin/activate # Linux/macOS
+# OR
+torch-inference\Scripts\activate # Windows
+
+# Install framework
+cd torch-inference
+uv sync
+```
+
+### Using uv Managed Environment (Simplest)
+```bash
+# uv manages everything automatically
+cd torch-inference
+uv sync # Creates and manages virtual environment
+uv run python test_installation.py
+```
+
+## ๐ Installation Verification
+
+### Quick Test
+```bash
+uv run python -c "import framework; print('โ
Framework imported successfully')"
+```
+
+### Comprehensive Test
+```bash
+uv run python test_installation.py
+```
+
+Expected output:
+```
+โ
Python environment: OK (3.11.5)
+โ
PyTorch installation: OK (2.8.0+cu124)
+โ
CUDA available: OK (12.4)
+โ
GPU memory: OK (24GB available)
+โ
Framework import: OK
+โ
Basic inference: OK (15.2ms)
+โ
Optimized inference: OK (3.8ms, 4.0x speedup)
+โ
TensorRT available: OK (10.12.0.36)
+โ
ONNX Runtime: OK (1.22.1)
+๐ Installation verification complete!
+```
+
+## ๐จ Troubleshooting Installation
+
+### Common Issues
+
+#### uv command not found
+```bash
+# Add uv to PATH (Linux/macOS)
+export PATH="$HOME/.local/bin:$PATH"
+
+# Or reinstall uv
+pip install --force-reinstall uv
+```
+
+#### CUDA out of memory during installation
+```bash
+# Install without CUDA first
+uv sync
+
+# Then install CUDA components separately
+uv add torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu124
+```
+
+#### TensorRT installation fails
+```bash
+# Skip TensorRT for now
+uv sync --extra cuda # Without TensorRT
+
+# Install TensorRT manually later
+uv add tensorrt torch-tensorrt
+```
+
+#### Permission errors (Windows)
+```powershell
+# Run PowerShell as Administrator
+Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
+
+# Then run setup
+.\setup_uv.ps1
+```
+
+#### Import errors
+```bash
+# Verify environment activation
+uv run which python
+
+# Check installed packages
+uv tree
+
+# Reinstall if needed
+uv sync --reinstall
+```
+
+### Platform-Specific Issues
+
+#### Linux
+```bash
+# Install system dependencies
+sudo apt-get update
+sudo apt-get install build-essential
+
+# For CUDA support
+sudo apt-get install nvidia-cuda-toolkit
+```
+
+#### macOS
+```bash
+# Install Xcode command line tools
+xcode-select --install
+
+# For Metal Performance Shaders (Apple Silicon)
+uv sync --extra mps
+```
+
+#### Windows
+```powershell
+# Install Microsoft C++ Build Tools
+# Download from: https://visualstudio.microsoft.com/visual-cpp-build-tools/
+
+# Or install Visual Studio with C++ support
+```
+
+### Environment Variables
+```bash
+# Set CUDA paths (if needed)
+export CUDA_HOME=/usr/local/cuda
+export PATH=$CUDA_HOME/bin:$PATH
+export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
+
+# Performance optimization
+export TORCH_CUDNN_BENCHMARK=true
+export TORCH_CUDA_ARCH_LIST="7.5;8.0;8.6;8.9;9.0"
+```
+
+## ๐ Installation Benchmarks
+
+### Installation Speed Comparison
+
+| Method | Time | Size | Features |
+|--------|------|------|----------|
+| **uv (base)** | ~30s | 1.2GB | Core only |
+| **uv (cuda)** | ~60s | 3.8GB | GPU support |
+| **uv (all)** | ~120s | 5.2GB | All features |
+| **pip** | ~300s | 6.1GB | Traditional |
+| **conda** | ~480s | 7.8GB | Kitchen sink |
+
+### System Requirements by Usage
+
+| Use Case | RAM | VRAM | Storage | Network |
+|----------|-----|------|---------|---------|
+| **CPU Only** | 4GB | - | 2GB | 1GB |
+| **GPU Basic** | 8GB | 6GB | 5GB | 3GB |
+| **Production** | 16GB | 12GB | 10GB | 5GB |
+| **Development** | 16GB | 12GB | 15GB | 8GB |
+
+## ๐ Updating Installation
+
+### Update Framework
+```bash
+# Update to latest version
+git pull origin main
+uv sync
+
+# Update specific dependencies
+uv add torch@latest
+```
+
+### Clean Installation
+```bash
+# Remove lock file and reinstall
+rm uv.lock
+uv sync
+
+# Complete clean install
+rm -rf .uv_cache/
+uv sync --reinstall
+```
+
+### Migration from pip/conda
+```bash
+# If migrating from existing installation
+pip uninstall torch-inference
+
+# Clean install with uv
+uv sync --reinstall
+```
+
+## ๐ณ Docker Installation
+
+### Pre-built Images
+```bash
+# CPU-only image
+docker pull evintkoo/torch-inference:cpu
+
+# GPU-enabled image
+docker pull evintkoo/torch-inference:gpu
+
+# Run container
+docker run --gpus all -p 8000:8000 evintkoo/torch-inference:gpu
+```
+
+### Build from Source
+```bash
+# Build optimized image
+docker build -t torch-inference-custom .
+
+# Run with GPU support
+docker run --gpus all -p 8000:8000 torch-inference-custom
+```
+
+## โ๏ธ Configuration After Installation
+
+### Set up environment variables
+```bash
+# Create .env file
+cat > .env << EOF
+DEVICE=cuda
+BATCH_SIZE=16
+LOG_LEVEL=INFO
+ENABLE_TENSORRT=true
+EOF
+```
+
+### Configure for your use case
+```bash
+# Copy example configuration
+cp config.example.yaml config.yaml
+
+# Edit for your needs
+nano config.yaml
+```
+
+### Set up monitoring
+```bash
+# Install monitoring dependencies
+uv sync --extra monitoring
+
+# Start monitoring dashboard
+uv run python -m framework.monitoring.dashboard
+```
+
+## ๐ฏ Next Steps
+
+After successful installation:
+
+1. **[Quick Start Guide](quickstart.md)** - Basic usage examples
+2. **[Configuration Guide](configuration.md)** - Customize settings
+3. **[Examples](examples.md)** - Explore use cases
+4. **[API Reference](api.md)** - Detailed documentation
+
+## ๐ Installation Support
+
+If you encounter issues:
+
+- ๐ **Check**: [Troubleshooting Guide](troubleshooting.md)
+- ๐ **Report**: [GitHub Issues](https://github.com/Evintkoo/torch-inference/issues)
+- ๐ฌ **Ask**: [GitHub Discussions](https://github.com/Evintkoo/torch-inference/discussions)
+- ๐ง **Email**: [support@torch-inference.dev](mailto:support@torch-inference.dev)
+
+---
+
+*Installation completed? Check out the [Quick Start Guide](quickstart.md) to begin using the framework!*
diff --git a/docs/quickstart.md b/docs/quickstart.md
new file mode 100644
index 0000000..2af69a1
--- /dev/null
+++ b/docs/quickstart.md
@@ -0,0 +1,571 @@
+# ๐ Quick Start Guide
+
+Get up and running with the PyTorch Inference Framework in minutes. This guide covers basic usage patterns and common scenarios.
+
+## ๐ฏ Prerequisites
+
+Before starting, ensure you have:
+- Python 3.10+ installed
+- Basic familiarity with PyTorch
+- A PyTorch model (we'll help you create one if needed)
+
+## ๐ฆ Installation
+
+### Quick Installation
+```bash
+# Install uv package manager
+pip install uv
+
+# Clone and setup the framework
+git clone https://github.com/Evintkoo/torch-inference.git
+cd torch-inference
+
+# Run automated setup
+uv sync && uv run python test_installation.py
+```
+
+For detailed installation instructions, see the [Installation Guide](installation.md).
+
+## ๐ Your First Inference
+
+### 1. Basic Synchronous Inference
+
+```python
+from framework import create_pytorch_framework
+import torch
+
+# Create a simple test model (or use your own)
+model = torch.nn.Sequential(
+ torch.nn.Linear(10, 20),
+ torch.nn.ReLU(),
+ torch.nn.Linear(20, 5)
+)
+
+# Save the model
+torch.save(model.state_dict(), "simple_model.pt")
+
+# Initialize framework
+framework = create_pytorch_framework(
+ model_path="simple_model.pt",
+ device="cpu" # or "cuda" if you have GPU
+)
+
+# Run inference
+input_data = torch.randn(1, 10)
+result = framework.predict(input_data)
+print(f"Prediction shape: {result.shape}")
+print(f"Prediction: {result}")
+```
+
+### 2. Async High-Performance Inference
+
+```python
+import asyncio
+from framework import create_async_framework
+
+async def async_inference_example():
+ # Initialize async framework
+ framework = await create_async_framework(
+ model_path="simple_model.pt",
+ batch_size=4, # Enable batching
+ max_batch_delay=0.05 # 50ms max batching delay
+ )
+
+ # Single async prediction
+ input_data = torch.randn(1, 10)
+ result = await framework.predict_async(input_data)
+ print(f"Async result: {result.shape}")
+
+ # Batch prediction
+ batch_inputs = [torch.randn(1, 10) for _ in range(8)]
+ batch_results = await framework.predict_batch_async(batch_inputs)
+ print(f"Batch results: {len(batch_results)} predictions")
+
+ # Concurrent predictions (automatically batched)
+ concurrent_inputs = [torch.randn(1, 10) for _ in range(10)]
+ tasks = [framework.predict_async(inp) for inp in concurrent_inputs]
+ concurrent_results = await asyncio.gather(*tasks)
+ print(f"Concurrent results: {len(concurrent_results)} predictions")
+
+ await framework.close()
+
+# Run async example
+asyncio.run(async_inference_example())
+```
+
+### 3. Optimized Inference (Automatic)
+
+```python
+from framework import create_optimized_framework
+
+# Framework automatically selects best optimizations
+framework = create_optimized_framework(
+ model_path="simple_model.pt",
+ optimization_level="aggressive" # auto, balanced, or aggressive
+)
+
+# The framework will:
+# - Auto-detect available optimizations (TensorRT, ONNX, etc.)
+# - Benchmark different optimization methods
+# - Select the fastest configuration
+# - Provide fallbacks if optimizations fail
+
+input_data = torch.randn(4, 10) # Batch input
+result = framework.predict(input_data)
+
+# Get optimization report
+report = framework.get_optimization_report()
+print(f"Selected optimization: {report['best_optimization']}")
+print(f"Performance improvement: {report['speedup']:.1f}x")
+print(f"Memory reduction: {report['memory_reduction']:.1%}")
+```
+
+## ๐ผ๏ธ Image Classification Example
+
+### Working with Real Models
+
+```python
+import torch
+import torchvision.transforms as transforms
+from PIL import Image
+from framework import create_pytorch_framework
+
+# Load a pre-trained model
+model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=True)
+model.eval()
+
+# Save for framework usage
+torch.save(model.state_dict(), "resnet18.pt")
+
+# Initialize framework with optimization
+framework = create_pytorch_framework(
+ model_path="resnet18.pt",
+ device="cuda" if torch.cuda.is_available() else "cpu",
+ enable_optimization=True # Enable automatic optimization
+)
+
+# Image preprocessing
+transform = transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225])
+])
+
+# Load and preprocess image
+image = Image.open("path/to/your/image.jpg")
+input_tensor = transform(image).unsqueeze(0)
+
+# Run inference
+with torch.no_grad():
+ prediction = framework.predict(input_tensor)
+ probabilities = torch.nn.functional.softmax(prediction[0], dim=0)
+
+# Get top 5 predictions
+top5_prob, top5_catid = torch.topk(probabilities, 5)
+for i in range(top5_prob.size(0)):
+ print(f"Class {top5_catid[i]}: {top5_prob[i]:.4f}")
+```
+
+## ๐ REST API Server
+
+### FastAPI Integration
+
+```python
+from fastapi import FastAPI, File, UploadFile, HTTPException
+from framework import create_optimized_framework
+from PIL import Image
+import torch
+import torchvision.transforms as transforms
+import io
+
+# Initialize optimized framework
+framework = create_optimized_framework(
+ model_path="resnet18.pt",
+ optimization_level="balanced"
+)
+
+# Create FastAPI app
+app = FastAPI(
+ title="PyTorch Inference API",
+ description="High-performance image classification API",
+ version="1.0.0"
+)
+
+# Image preprocessing
+transform = transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225])
+])
+
+@app.post("/predict")
+async def predict_image(file: UploadFile = File(...)):
+ """Classify an uploaded image"""
+ try:
+ # Read and preprocess image
+ image_data = await file.read()
+ image = Image.open(io.BytesIO(image_data)).convert('RGB')
+ input_tensor = transform(image).unsqueeze(0)
+
+ # Run inference
+ prediction = await framework.predict_async(input_tensor)
+ probabilities = torch.nn.functional.softmax(prediction[0], dim=0)
+
+ # Get top prediction
+ top_prob, top_class = torch.max(probabilities, 0)
+
+ return {
+ "predicted_class": int(top_class.item()),
+ "confidence": float(top_prob.item()),
+ "filename": file.filename
+ }
+
+ except Exception as e:
+ raise HTTPException(status_code=500, detail=str(e))
+
+@app.get("/health")
+async def health_check():
+ """Health check endpoint"""
+ stats = await framework.get_health_status()
+ return {
+ "status": "healthy",
+ "model_loaded": stats["model_loaded"],
+ "predictions_served": stats["prediction_count"],
+ "average_latency_ms": stats["avg_latency_ms"]
+ }
+
+@app.get("/")
+async def root():
+ """API information"""
+ return {
+ "message": "PyTorch Inference API",
+ "version": "1.0.0",
+ "optimization": framework.get_optimization_info(),
+ "endpoints": {
+ "POST /predict": "Upload image for classification",
+ "GET /health": "API health status",
+ "GET /docs": "Interactive API documentation"
+ }
+ }
+
+# Run with: uvicorn main:app --host 0.0.0.0 --port 8000
+```
+
+### Running the API Server
+
+```bash
+# Install FastAPI dependencies
+uv add fastapi uvicorn python-multipart
+
+# Run the server
+uv run uvicorn main:app --host 0.0.0.0 --port 8000 --reload
+
+# Test the API
+curl -X POST "http://localhost:8000/predict" \
+ -H "accept: application/json" \
+ -H "Content-Type: multipart/form-data" \
+ -F "file=@path/to/image.jpg"
+
+# View interactive docs
+open http://localhost:8000/docs
+```
+
+## ๐ง Configuration Basics
+
+### Environment Variables (.env file)
+
+Create a `.env` file in your project root:
+
+```bash
+# Device Configuration
+DEVICE=cuda # auto, cpu, cuda, mps
+USE_FP16=true # Enable half precision for speed
+
+# Performance Settings
+BATCH_SIZE=8 # Default batch size
+MAX_BATCH_SIZE=32 # Maximum batch size for batching
+WARMUP_ITERATIONS=5 # Model warmup iterations
+
+# Optimization Settings
+ENABLE_TENSORRT=true # Enable TensorRT (requires NVIDIA GPU)
+ENABLE_QUANTIZATION=true # Enable quantization
+ENABLE_JIT=true # Enable JIT compilation
+
+# Server Settings
+HOST=0.0.0.0
+PORT=8000
+LOG_LEVEL=INFO
+```
+
+### YAML Configuration (config.yaml)
+
+```yaml
+device:
+ type: "cuda"
+ use_fp16: true
+ memory_fraction: 0.8
+
+batch:
+ batch_size: 8
+ max_batch_size: 32
+ adaptive_batching: true
+ timeout_seconds: 0.1
+
+optimization:
+ enable_tensorrt: true
+ enable_quantization: true
+ enable_jit: true
+ optimization_level: "balanced"
+
+server:
+ host: "0.0.0.0"
+ port: 8000
+ log_level: "INFO"
+```
+
+### Using Configuration
+
+```python
+from framework.core.config_manager import get_config_manager
+from framework import TorchInferenceFramework
+
+# Load configuration
+config_manager = get_config_manager()
+inference_config = config_manager.get_inference_config()
+
+# Create framework with configuration
+framework = TorchInferenceFramework(config=inference_config)
+framework.load_model("path/to/model.pt", "my_model")
+
+# Configuration is automatically applied
+result = framework.predict(input_data)
+```
+
+## โก Performance Optimization
+
+### Quick Performance Boost
+
+```python
+from framework import create_pytorch_framework
+
+# Automatic optimization (easiest)
+framework = create_pytorch_framework(
+ model_path="your_model.pt",
+ device="cuda", # Use GPU
+ enable_optimization=True # Auto-optimize
+)
+
+# Manual optimization control
+from framework.core.config import InferenceConfig, OptimizationConfig
+
+config = InferenceConfig(
+ model_path="your_model.pt",
+ optimization=OptimizationConfig(
+ enable_tensorrt=True, # 2-5x GPU speedup
+ enable_quantization=True, # 2x memory reduction
+ enable_jit=True, # 20-50% speedup
+ enable_cuda_graphs=True # Consistent low latency
+ )
+)
+
+framework = TorchInferenceFramework(config=config)
+```
+
+### Benchmark Your Performance
+
+```python
+import time
+from framework import create_optimized_framework
+
+# Create optimized framework
+framework = create_optimized_framework(
+ model_path="your_model.pt",
+ optimization_level="aggressive"
+)
+
+# Benchmark inference
+test_input = torch.randn(16, 3, 224, 224) # Batch of 16 images
+
+# Warmup
+for _ in range(10):
+ _ = framework.predict(test_input)
+
+# Benchmark
+num_runs = 100
+start_time = time.time()
+for _ in range(num_runs):
+ result = framework.predict(test_input)
+end_time = time.time()
+
+# Calculate metrics
+total_time = end_time - start_time
+avg_latency = (total_time / num_runs) * 1000 # ms
+throughput = (num_runs * test_input.shape[0]) / total_time # samples/sec
+
+print(f"Average latency: {avg_latency:.1f}ms")
+print(f"Throughput: {throughput:.1f} samples/sec")
+print(f"Batch size: {test_input.shape[0]}")
+
+# Get optimization report
+report = framework.get_optimization_report()
+print(f"Optimization: {report['best_optimization']}")
+print(f"Speedup: {report['speedup']:.1f}x")
+```
+
+## ๐ณ Docker Deployment
+
+### Quick Docker Setup
+
+```bash
+# Build container
+docker build -t my-inference-api .
+
+# Run with GPU support
+docker run --gpus all -p 8000:8000 \
+ -e DEVICE=cuda \
+ -e ENABLE_TENSORRT=true \
+ my-inference-api
+
+# Or use docker-compose
+docker-compose up --build
+```
+
+### Docker Compose (docker-compose.yml)
+
+```yaml
+version: '3.8'
+
+services:
+ inference-api:
+ build: .
+ ports:
+ - "8000:8000"
+ environment:
+ - DEVICE=cuda
+ - BATCH_SIZE=16
+ - ENABLE_TENSORRT=true
+ - LOG_LEVEL=INFO
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - driver: nvidia
+ count: 1
+ capabilities: [gpu]
+ volumes:
+ - ./models:/app/models
+ - ./data:/app/data
+```
+
+## ๐ Monitoring and Debugging
+
+### Built-in Monitoring
+
+```python
+from framework import create_monitored_framework
+
+# Framework with monitoring
+framework = create_monitored_framework(
+ model_path="your_model.pt",
+ enable_detailed_metrics=True
+)
+
+# Run inference
+result = framework.predict(input_data)
+
+# Get performance metrics
+metrics = framework.get_metrics()
+print(f"Latency: {metrics['latency']['avg_ms']:.1f}ms")
+print(f"Throughput: {metrics['throughput']['requests_per_second']:.1f} req/s")
+print(f"Memory usage: {metrics['memory']['gpu_used_gb']:.1f}GB")
+
+# Health check
+health = framework.get_health_status()
+print(f"Status: {health['status']}")
+print(f"Predictions served: {health['prediction_count']}")
+```
+
+### Debug Mode
+
+```python
+from framework import create_pytorch_framework
+import logging
+
+# Enable debug logging
+logging.basicConfig(level=logging.DEBUG)
+
+# Framework with debug info
+framework = create_pytorch_framework(
+ model_path="your_model.pt",
+ debug=True, # Enable debug mode
+ enable_profiling=True # Enable profiling
+)
+
+# Detailed prediction info
+result = framework.predict_with_info(input_data)
+print(f"Prediction: {result['prediction']}")
+print(f"Latency: {result['latency_ms']:.1f}ms")
+print(f"Memory used: {result['memory_mb']:.1f}MB")
+print(f"Optimization: {result['optimization_used']}")
+```
+
+## ๐จ Common Issues and Solutions
+
+### Issue: CUDA out of memory
+```python
+# Solution: Reduce batch size or use CPU
+framework = create_pytorch_framework(
+ model_path="your_model.pt",
+ device="cpu", # Use CPU instead
+ # Or reduce batch size
+ batch_size=4 # Instead of 16
+)
+```
+
+### Issue: Slow first inference
+```python
+# Solution: Enable warmup
+framework = create_pytorch_framework(
+ model_path="your_model.pt",
+ warmup_iterations=10 # Warmup model
+)
+```
+
+### Issue: TensorRT optimization fails
+```python
+# Solution: Use fallback optimization
+from framework.core.config import InferenceConfig, OptimizationConfig
+
+config = InferenceConfig(
+ optimization=OptimizationConfig(
+ enable_tensorrt=False, # Disable TensorRT
+ enable_quantization=True, # Use quantization instead
+ enable_jit=True # Enable JIT compilation
+ )
+)
+```
+
+## ๐ Next Steps
+
+Now that you've got the basics, explore more advanced features:
+
+1. **[Configuration Guide](configuration.md)** - Advanced configuration options
+2. **[Optimization Guide](optimization.md)** - Detailed performance tuning
+3. **[API Reference](api.md)** - Complete API documentation
+4. **[Examples](examples.md)** - More complex use cases
+5. **[Deployment Guide](deployment.md)** - Production deployment
+
+## ๐ Getting Help
+
+- ๐ **Documentation**: Full documentation at [docs/](.)
+- ๐ **Issues**: Report issues on [GitHub](https://github.com/Evintkoo/torch-inference/issues)
+- ๐ฌ **Discussions**: Ask questions on [GitHub Discussions](https://github.com/Evintkoo/torch-inference/discussions)
+- ๐ง **Email**: Contact us at [support@torch-inference.dev](mailto:support@torch-inference.dev)
+
+---
+
+*Ready for production? Check out the [Deployment Guide](deployment.md) for scaling and production best practices.*
diff --git a/docs/testing.md b/docs/testing.md
new file mode 100644
index 0000000..939c465
--- /dev/null
+++ b/docs/testing.md
@@ -0,0 +1,857 @@
+# ๐งช Testing Documentation
+
+This guide covers the comprehensive test suite for the PyTorch Inference Framework, including test structure, running tests, and contributing new tests.
+
+## ๐ Test Overview
+
+The framework includes a robust test suite with:
+- **2000+ test cases** across all modules
+- **90%+ code coverage** on critical paths
+- **Unit, integration, and performance tests**
+- **Mock implementations** for optional dependencies
+- **CI/CD integration** with automated testing
+
+## ๐๏ธ Test Structure
+
+```
+tests/
+โโโ conftest.py # Shared fixtures and configuration
+โโโ __init__.py # Package initialization
+โโโ README.md # Testing documentation
+โโโ fixtures/ # Test data and fixtures
+โโโ unit/ # Unit tests for individual modules
+โ โโโ test_config.py # Configuration system (200+ tests)
+โ โโโ test_base_model.py # Model management (250+ tests)
+โ โโโ test_inference_engine.py # Async inference (300+ tests)
+โ โโโ test_optimizers.py # Optimization modules (350+ tests)
+โ โโโ test_adapters.py # Model adapters (200+ tests)
+โ โโโ test_enterprise.py # Enterprise features (250+ tests)
+โ โโโ test_utils.py # Utility modules (150+ tests)
+โ โโโ test_framework.py # Main framework (400+ tests)
+โโโ integration/ # Integration tests
+โ โโโ test_framework_integration.py # End-to-end workflows (600+ tests)
+โโโ models/ # Test model utilities
+โ โโโ create_test_models.py # Model download/creation
+โ โโโ model_loader.py # Model loading utilities
+โ โโโ README.md # Model documentation
+โโโ run_tests.py # Test runner script
+```
+
+## ๐ Running Tests
+
+### Quick Start
+
+```bash
+# Install test dependencies
+uv sync --extra dev
+
+# Run all tests
+uv run pytest
+
+# Run with coverage
+uv run pytest --cov=framework
+
+# Run specific test categories
+uv run pytest -m unit # Unit tests only
+uv run pytest -m integration # Integration tests only
+uv run pytest -m "not slow" # Skip slow tests
+```
+
+### Using Test Runner Script
+
+```bash
+# Run all tests
+python run_tests.py all
+
+# Run only unit tests
+python run_tests.py unit
+
+# Run only integration tests
+python run_tests.py integration
+
+# Run with coverage reporting
+python run_tests.py coverage
+
+# Run specific test file
+python run_tests.py unit --test-file test_config.py
+
+# Verbose output
+python run_tests.py all --verbose
+
+# Performance benchmarks
+python run_tests.py performance
+```
+
+### Using Helper Scripts
+
+**Windows:**
+```cmd
+test.bat install-dev # Install dependencies
+test.bat test # Run all tests
+test.bat coverage # Run with coverage
+test.bat lint # Run code quality checks
+```
+
+**Unix/Linux/macOS:**
+```bash
+make install-dev # Install dependencies
+make test # Run all tests
+make coverage # Run with coverage
+make lint # Run code quality checks
+```
+
+## ๐ท๏ธ Test Markers
+
+Tests are categorized using pytest markers:
+
+### Performance Markers
+- `unit` - Fast, isolated unit tests
+- `integration` - End-to-end integration tests
+- `slow` - Tests taking >5 seconds
+- `benchmark` - Performance benchmarks
+
+### Technology Markers
+- `gpu` - Tests requiring GPU/CUDA
+- `tensorrt` - Tests requiring TensorRT
+- `onnx` - Tests requiring ONNX runtime
+- `enterprise` - Enterprise feature tests
+
+### Functional Markers
+- `smoke` - Quick validation tests
+- `regression` - Regression tests
+- `security` - Security-related tests
+- `api` - API endpoint tests
+- `model` - Tests requiring real models
+- `mock` - Tests using only mock objects
+
+### Running Specific Categories
+
+```bash
+# Run only fast tests
+uv run pytest -m "not slow and not gpu"
+
+# Run GPU tests
+uv run pytest -m gpu
+
+# Run enterprise tests
+uv run pytest -m enterprise
+
+# Run benchmarks only
+uv run pytest -m benchmark --benchmark-only
+
+# Combine markers
+uv run pytest -m "unit and not slow"
+```
+
+## ๐ Test Categories
+
+### Unit Tests (`tests/unit/`)
+
+#### Configuration Tests (`test_config.py`)
+Tests the configuration management system:
+
+```python
+class TestDeviceConfig:
+ """Test device configuration validation and conversion"""
+
+ def test_device_detection(self):
+ """Test automatic device detection"""
+ config = DeviceConfig(device_type=DeviceType.AUTO)
+ assert config.get_resolved_device() in ["cpu", "cuda", "mps"]
+
+ def test_invalid_device_handling(self):
+ """Test handling of invalid device specifications"""
+ with pytest.raises(ValueError):
+ DeviceConfig(device_type="invalid_device")
+
+class TestConfigManager:
+ """Test configuration manager functionality"""
+
+ def test_environment_variable_override(self, monkeypatch):
+ """Test environment variable precedence"""
+ monkeypatch.setenv("BATCH_SIZE", "32")
+ config_manager = ConfigManager()
+ assert config_manager.get("BATCH_SIZE", default=4) == 32
+
+ def test_yaml_configuration_loading(self, tmp_path):
+ """Test YAML configuration file parsing"""
+ config_file = tmp_path / "config.yaml"
+ config_file.write_text("""
+ batch:
+ batch_size: 16
+ """)
+
+ config_manager = ConfigManager(config_file=config_file)
+ inference_config = config_manager.get_inference_config()
+ assert inference_config.batch.batch_size == 16
+```
+
+#### Model Management Tests (`test_base_model.py`)
+Tests model loading and management:
+
+```python
+class TestBaseModel:
+ """Test base model abstract class"""
+
+ def test_prediction_interface(self, simple_model):
+ """Test model prediction interface"""
+ model = MockModel(simple_model)
+ result = model.predict(torch.randn(1, 10))
+ assert result is not None
+ assert isinstance(result, torch.Tensor)
+
+class TestModelManager:
+ """Test model manager functionality"""
+
+ def test_model_registration(self, model_manager, simple_model):
+ """Test model registration and retrieval"""
+ model_manager.register_model("test_model", simple_model)
+ retrieved_model = model_manager.get_model("test_model")
+ assert retrieved_model is not None
+
+ def test_memory_usage_tracking(self, model_manager, complex_model):
+ """Test memory usage monitoring"""
+ initial_memory = model_manager.get_memory_usage()
+ model_manager.register_model("memory_test", complex_model)
+ final_memory = model_manager.get_memory_usage()
+ assert final_memory > initial_memory
+```
+
+#### Inference Engine Tests (`test_inference_engine.py`)
+Tests async inference capabilities:
+
+```python
+class TestInferenceEngine:
+ """Test async inference engine"""
+
+ @pytest.mark.asyncio
+ async def test_async_prediction(self, inference_engine, sample_input):
+ """Test basic async prediction"""
+ result = await inference_engine.predict_async(sample_input)
+ assert result is not None
+
+ @pytest.mark.asyncio
+ async def test_concurrent_requests(self, inference_engine):
+ """Test handling multiple concurrent requests"""
+ inputs = [torch.randn(1, 10) for _ in range(10)]
+ tasks = [inference_engine.predict_async(inp) for inp in inputs]
+ results = await asyncio.gather(*tasks)
+ assert len(results) == 10
+ assert all(r is not None for r in results)
+
+ @pytest.mark.asyncio
+ async def test_batch_processing(self, inference_engine):
+ """Test dynamic batching functionality"""
+ batch_inputs = [torch.randn(1, 10) for _ in range(5)]
+
+ # Submit requests close together for batching
+ start_time = time.time()
+ tasks = [inference_engine.predict_async(inp) for inp in batch_inputs]
+ results = await asyncio.gather(*tasks)
+ end_time = time.time()
+
+ # Should be faster than individual requests due to batching
+ assert len(results) == 5
+ assert end_time - start_time < 1.0 # Should be fast due to batching
+```
+
+#### Optimizer Tests (`test_optimizers.py`)
+Tests all optimization modules:
+
+```python
+class TestTensorRTOptimizer:
+ """Test TensorRT optimization (with mocks for CI)"""
+
+ def test_tensorrt_optimization_mock(self, simple_model):
+ """Test TensorRT optimization with mock"""
+ optimizer = MockTensorRTOptimizer()
+ optimized_model = optimizer.optimize(simple_model)
+ assert optimized_model is not None
+ assert optimizer.get_optimization_info()["speedup"] > 1.0
+
+ @pytest.mark.gpu
+ @pytest.mark.tensorrt
+ def test_real_tensorrt_optimization(self, simple_model):
+ """Test real TensorRT optimization (requires GPU)"""
+ if not torch.cuda.is_available():
+ pytest.skip("CUDA not available")
+
+ try:
+ import tensorrt
+ optimizer = TensorRTOptimizer()
+ optimized_model = optimizer.optimize(simple_model)
+ assert optimized_model is not None
+ except ImportError:
+ pytest.skip("TensorRT not available")
+
+class TestQuantizationOptimizer:
+ """Test quantization optimization"""
+
+ def test_dynamic_quantization(self, simple_model):
+ """Test dynamic quantization"""
+ optimizer = QuantizationOptimizer()
+ quantized_model = optimizer.quantize_dynamic(simple_model)
+
+ # Check model size reduction
+ original_size = sum(p.numel() * p.element_size() for p in simple_model.parameters())
+ quantized_size = sum(p.numel() * p.element_size() for p in quantized_model.parameters())
+
+ # Should be smaller (though exact reduction depends on model)
+ assert quantized_size <= original_size
+
+ def test_quantization_accuracy(self, simple_model):
+ """Test quantization maintains reasonable accuracy"""
+ optimizer = QuantizationOptimizer()
+ quantized_model = optimizer.quantize_dynamic(simple_model)
+
+ # Test inputs
+ test_input = torch.randn(10, 10)
+ original_output = simple_model(test_input)
+ quantized_output = quantized_model(test_input)
+
+ # Should maintain similar outputs
+ mse = torch.nn.functional.mse_loss(original_output, quantized_output)
+ assert mse < 0.1 # Allow some quantization error
+```
+
+### Integration Tests (`tests/integration/`)
+
+#### End-to-End Tests (`test_framework_integration.py`)
+Tests complete workflows:
+
+```python
+class TestFrameworkIntegration:
+ """Test complete framework integration"""
+
+ @pytest.mark.asyncio
+ async def test_complete_async_workflow(self, temp_model_dir):
+ """Test complete async inference workflow"""
+ # Create and save a test model
+ model = torch.nn.Linear(10, 5)
+ model_path = temp_model_dir / "test_model.pt"
+ torch.save(model.state_dict(), model_path)
+
+ # Initialize framework
+ config = InferenceConfig(
+ model_path=str(model_path),
+ device=DeviceConfig(device_type=DeviceType.CPU),
+ batch=BatchConfig(batch_size=4, max_batch_size=8)
+ )
+
+ framework = TorchInferenceFramework(config=config)
+ await framework.initialize()
+
+ # Test predictions
+ test_inputs = [torch.randn(1, 10) for _ in range(10)]
+
+ # Single predictions
+ result = await framework.predict_async(test_inputs[0])
+ assert result is not None
+ assert result.shape == (1, 5)
+
+ # Batch predictions
+ batch_results = await framework.predict_batch_async(test_inputs[:5])
+ assert len(batch_results) == 5
+
+ # Concurrent predictions
+ tasks = [framework.predict_async(inp) for inp in test_inputs]
+ concurrent_results = await asyncio.gather(*tasks)
+ assert len(concurrent_results) == 10
+
+ # Cleanup
+ await framework.cleanup()
+
+ def test_optimization_pipeline(self, temp_model_dir):
+ """Test optimization pipeline integration"""
+ # Create test model
+ model = create_test_model("resnet_like")
+ model_path = temp_model_dir / "optimization_test.pt"
+ torch.save(model.state_dict(), model_path)
+
+ # Test different optimization configurations
+ optimizations = [
+ {"enable_jit": True},
+ {"enable_quantization": True},
+ {"enable_jit": True, "enable_quantization": True},
+ ]
+
+ results = {}
+
+ for i, opt_config in enumerate(optimizations):
+ config = InferenceConfig(
+ model_path=str(model_path),
+ optimization=OptimizationConfig(**opt_config)
+ )
+
+ framework = TorchInferenceFramework(config=config)
+ framework.initialize()
+
+ # Benchmark performance
+ test_input = torch.randn(4, 3, 224, 224)
+
+ start_time = time.time()
+ for _ in range(10):
+ result = framework.predict(test_input)
+ end_time = time.time()
+
+ results[f"config_{i}"] = {
+ "time": end_time - start_time,
+ "config": opt_config,
+ "output_shape": result.shape
+ }
+
+ framework.cleanup()
+
+ # Verify all configurations worked
+ assert len(results) == len(optimizations)
+ for result in results.values():
+ assert result["output_shape"] is not None
+ assert result["time"] > 0
+```
+
+## ๐ ๏ธ Test Fixtures and Utilities
+
+### Core Fixtures (`conftest.py`)
+
+```python
+@pytest.fixture
+def simple_model():
+ """Simple linear model for testing"""
+ return torch.nn.Sequential(
+ torch.nn.Linear(10, 20),
+ torch.nn.ReLU(),
+ torch.nn.Linear(20, 5)
+ )
+
+@pytest.fixture
+def complex_model():
+ """More complex model for performance testing"""
+ return torch.nn.Sequential(
+ torch.nn.Conv2d(3, 16, 3),
+ torch.nn.ReLU(),
+ torch.nn.AdaptiveAvgPool2d((1, 1)),
+ torch.nn.Flatten(),
+ torch.nn.Linear(16, 10)
+ )
+
+@pytest.fixture
+def inference_config():
+ """Standard inference configuration for testing"""
+ return InferenceConfig(
+ device=DeviceConfig(device_type=DeviceType.CPU),
+ batch=BatchConfig(batch_size=4, max_batch_size=8),
+ optimization=OptimizationConfig(enable_jit=False)
+ )
+
+@pytest.fixture
+async def inference_engine(simple_model, inference_config):
+ """Configured inference engine for testing"""
+ engine = InferenceEngine(inference_config)
+ await engine.initialize()
+ engine.load_model(simple_model, "test_model")
+ yield engine
+ await engine.cleanup()
+
+@pytest.fixture
+def temp_model_dir(tmp_path):
+ """Temporary directory for model files"""
+ model_dir = tmp_path / "models"
+ model_dir.mkdir()
+ return model_dir
+
+@pytest.fixture
+def mock_model_manager():
+ """Pre-configured model manager with mock models"""
+ manager = ModelManager()
+
+ # Add some mock models
+ for i in range(3):
+ mock_model = MockModel(torch.nn.Linear(10, 5))
+ manager.register_model(f"mock_model_{i}", mock_model)
+
+ return manager
+```
+
+### Mock Classes
+
+```python
+class MockModel:
+ """Realistic model behavior for testing"""
+
+ def __init__(self, pytorch_model):
+ self.model = pytorch_model
+ self.model.eval()
+ self.predict_count = 0
+ self.total_inference_time = 0
+
+ def predict(self, input_tensor):
+ """Mock prediction with timing"""
+ start_time = time.time()
+ with torch.no_grad():
+ result = self.model(input_tensor)
+ end_time = time.time()
+
+ self.predict_count += 1
+ self.total_inference_time += (end_time - start_time)
+
+ return result
+
+ def get_statistics(self):
+ """Get mock model statistics"""
+ avg_time = self.total_inference_time / max(self.predict_count, 1)
+ return {
+ "predict_count": self.predict_count,
+ "average_inference_time": avg_time,
+ "total_time": self.total_inference_time
+ }
+
+class MockTensorRTOptimizer:
+ """Mock TensorRT optimizer for testing"""
+
+ def optimize(self, model):
+ """Mock optimization that wraps model"""
+ return OptimizedModelWrapper(model, speedup=3.5)
+
+ def get_optimization_info(self):
+ return {
+ "optimizer": "TensorRT",
+ "speedup": 3.5,
+ "memory_reduction": 0.6
+ }
+```
+
+## โก Performance Testing
+
+### Benchmark Tests
+
+```python
+@pytest.mark.benchmark
+class TestPerformanceBenchmarks:
+ """Performance benchmark tests"""
+
+ def test_inference_latency_benchmark(self, benchmark, simple_model):
+ """Benchmark inference latency"""
+ test_input = torch.randn(1, 10)
+
+ def inference():
+ with torch.no_grad():
+ return simple_model(test_input)
+
+ result = benchmark(inference)
+ assert result is not None
+
+ def test_batch_throughput_benchmark(self, benchmark, simple_model):
+ """Benchmark batch processing throughput"""
+ batch_input = torch.randn(16, 10)
+
+ def batch_inference():
+ with torch.no_grad():
+ return simple_model(batch_input)
+
+ result = benchmark(batch_inference)
+ assert result.shape == (16, 5)
+
+ @pytest.mark.gpu
+ def test_gpu_optimization_benchmark(self, benchmark):
+ """Benchmark GPU optimization performance"""
+ if not torch.cuda.is_available():
+ pytest.skip("CUDA not available")
+
+ model = torch.nn.Linear(1000, 1000).cuda()
+ input_tensor = torch.randn(100, 1000).cuda()
+
+ def gpu_inference():
+ with torch.no_grad():
+ return model(input_tensor)
+
+ result = benchmark(gpu_inference)
+ assert result.device.type == 'cuda'
+```
+
+### Running Benchmarks
+
+```bash
+# Run benchmark tests only
+uv run pytest -m benchmark --benchmark-only
+
+# Save benchmark results
+uv run pytest -m benchmark --benchmark-json=benchmark.json
+
+# Compare benchmarks
+uv run pytest -m benchmark --benchmark-compare=benchmark.json
+
+# Sort benchmarks by mean time
+uv run pytest -m benchmark --benchmark-sort=mean
+```
+
+## ๐ง Test Configuration
+
+### pytest.ini Configuration
+
+```ini
+[tool:pytest]
+minversion = 6.0
+addopts = -ra -q --strict-markers --tb=short
+testpaths = tests
+python_files = test_*.py *_test.py
+python_classes = Test*
+python_functions = test_*
+
+markers =
+ unit: Fast, isolated unit tests
+ integration: Slower end-to-end tests
+ slow: Tests taking >5 seconds
+ gpu: Tests requiring GPU/CUDA
+ tensorrt: Tests requiring TensorRT
+ onnx: Tests requiring ONNX runtime
+ enterprise: Enterprise feature tests
+ benchmark: Performance benchmarks
+ smoke: Quick validation tests
+ regression: Regression tests
+ security: Security-related tests
+ api: API endpoint tests
+ model: Tests requiring real models
+ mock: Tests using only mock objects
+
+filterwarnings =
+ ignore::DeprecationWarning:torch.*
+ ignore::UserWarning:transformers.*
+ error::RuntimeWarning
+ error::ImportWarning
+
+asyncio_mode = auto
+asyncio_default_fixture_loop_scope = function
+
+timeout = 300
+timeout_method = thread
+
+log_cli = true
+log_cli_level = INFO
+log_cli_format = %(asctime)s [%(levelname)8s] %(name)s: %(message)s
+log_cli_date_format = %Y-%m-%d %H:%M:%S
+
+junit_family = xunit2
+```
+
+### Coverage Configuration
+
+```ini
+[tool:coverage:run]
+source = framework
+omit =
+ tests/*
+ framework/__pycache__/*
+ */__pycache__/*
+ setup.py
+ */site-packages/*
+
+[tool:coverage:report]
+exclude_lines =
+ pragma: no cover
+ def __repr__
+ raise AssertionError
+ raise NotImplementedError
+ if __name__ == .__main__.:
+ @abstractmethod
+
+[tool:coverage:html]
+directory = htmlcov
+```
+
+## ๐จ Debugging Tests
+
+### Common Debugging Commands
+
+```bash
+# Stop on first failure
+uv run pytest -x
+
+# Run only failed tests from last run
+uv run pytest --lf
+
+# Run failed tests first
+uv run pytest --ff
+
+# Verbose debugging output
+uv run pytest -vvv --tb=long --showlocals
+
+# Debug specific test
+uv run pytest tests/unit/test_config.py::TestDeviceConfig::test_device_detection -vvv
+
+# Run with pdb on failure
+uv run pytest --pdb
+
+# Run tests matching pattern
+uv run pytest -k "test_async"
+```
+
+### Test Environment Variables
+
+```bash
+# Set test environment
+export ENVIRONMENT=test
+export DEVICE=cpu
+export LOG_LEVEL=DEBUG
+export BATCH_SIZE=1
+export TEST_TIMEOUT=600
+
+# Run tests with environment
+uv run pytest tests/
+```
+
+## ๐ Test Metrics and Reporting
+
+### Coverage Reporting
+
+```bash
+# Generate HTML coverage report
+uv run pytest --cov=framework --cov-report=html
+
+# Open coverage report
+open htmlcov/index.html # macOS
+xdg-open htmlcov/index.html # Linux
+start htmlcov/index.html # Windows
+```
+
+### JUnit XML for CI
+
+```bash
+# Generate JUnit XML for CI systems
+uv run pytest --junitxml=junit.xml
+
+# CI-friendly output
+uv run pytest --tb=short --junit-xml=junit.xml
+```
+
+### Test Statistics
+
+The test runner provides detailed statistics:
+
+```bash
+python run_tests.py all --stats
+
+# Example output:
+# Test Results Summary:
+# =====================
+# Total Tests: 2,147
+# Passed: 2,142 (99.8%)
+# Failed: 0 (0.0%)
+# Skipped: 5 (0.2%)
+# Errors: 0 (0.0%)
+#
+# Coverage: 94.2%
+# Duration: 45.7 seconds
+#
+# Test Categories:
+# - Unit Tests: 1,547 (72.0%)
+# - Integration Tests: 600 (28.0%)
+#
+# Performance Tests: 25
+# Average Inference Time: 2.3ms
+# Memory Usage: 245MB peak
+```
+
+## ๐ Continuous Integration
+
+### GitHub Actions Integration
+
+The test suite integrates with CI/CD:
+
+```yaml
+# .github/workflows/test.yml
+name: Test Suite
+
+on: [push, pull_request]
+
+jobs:
+ test:
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os: [ubuntu-latest, windows-latest, macos-latest]
+ python-version: [3.10, 3.11, 3.12]
+
+ steps:
+ - uses: actions/checkout@v4
+ - uses: astral-sh/setup-uv@v3
+
+ - name: Install dependencies
+ run: uv sync --extra dev
+
+ - name: Run tests
+ run: uv run pytest --cov=framework --junit-xml=junit.xml
+
+ - name: Upload coverage
+ uses: codecov/codecov-action@v4
+ with:
+ file: ./coverage.xml
+```
+
+## ๐ Contributing Tests
+
+### Writing New Tests
+
+1. **Follow naming conventions**: `test_`
+2. **Use existing fixtures**: Leverage `conftest.py` fixtures
+3. **Include docstrings**: Explain test purpose clearly
+4. **Test edge cases**: Include error conditions
+5. **Use appropriate markers**: Categorize tests properly
+
+### Test Review Checklist
+
+- [ ] Tests cover new functionality completely
+- [ ] Proper error case testing included
+- [ ] No external dependencies without mocks
+- [ ] Tests are deterministic and reproducible
+- [ ] Performance impact is acceptable
+- [ ] Documentation updated if needed
+- [ ] Appropriate test markers applied
+
+### Example New Test
+
+```python
+class TestNewFeature:
+ """Test new feature functionality"""
+
+ def test_basic_functionality(self, fixture):
+ """Test basic feature operation"""
+ # Arrange
+ feature = NewFeature(config)
+
+ # Act
+ result = feature.execute(input_data)
+
+ # Assert
+ assert result is not None
+ assert isinstance(result, ExpectedType)
+
+ def test_error_handling(self, fixture):
+ """Test error handling in edge cases"""
+ feature = NewFeature(config)
+
+ with pytest.raises(ExpectedError):
+ feature.execute(invalid_input)
+
+ @pytest.mark.slow
+ def test_performance_requirements(self, fixture):
+ """Test performance meets requirements"""
+ feature = NewFeature(config)
+
+ start_time = time.time()
+ result = feature.execute(large_input)
+ end_time = time.time()
+
+ assert end_time - start_time < 1.0 # Must complete in 1s
+ assert result.quality_score > 0.95 # Must maintain quality
+```
+
+## ๐ Related Documentation
+
+- **[Configuration Guide](configuration.md)** - Test configuration
+- **[API Reference](api.md)** - Testing APIs
+- **[Contributing Guide](contributing.md)** - Development workflow
+- **[Troubleshooting Guide](troubleshooting.md)** - Common test issues
+
+---
+
+*Ready to contribute tests? Check out the [Contributing Guide](contributing.md) for development setup and workflow.*
diff --git a/examples/basic_usage.py b/examples/basic_usage.py
new file mode 100644
index 0000000..ea8c932
--- /dev/null
+++ b/examples/basic_usage.py
@@ -0,0 +1,160 @@
+"""
+Basic Usage Example for PyTorch Inference Framework
+
+This example demonstrates simple synchronous inference patterns
+using the optimized PyTorch inference framework.
+"""
+
+import torch
+import numpy as np
+from pathlib import Path
+
+# Import framework (placeholder - adjust based on actual implementation)
+# from framework import create_pytorch_framework
+
+def basic_inference_example():
+ """
+ Basic synchronous inference example
+ """
+ print("๐ Basic Inference Example")
+ print("=" * 50)
+
+ # Example model path (placeholder)
+ model_path = "path/to/your/model.pt"
+
+ # Create basic framework
+ print("๐ฆ Creating PyTorch framework...")
+ # framework = create_pytorch_framework(
+ # model_path=model_path,
+ # device="cuda" if torch.cuda.is_available() else "cpu"
+ # )
+
+ # Example input data
+ input_data = torch.randn(1, 3, 224, 224) # Example image tensor
+
+ print(f"๐ Input shape: {input_data.shape}")
+ print(f"๐ฅ๏ธ Device: {'CUDA' if torch.cuda.is_available() else 'CPU'}")
+
+ # Single prediction
+ print("\n๐ฎ Single Prediction:")
+ # result = framework.predict(input_data)
+ # print(f"Result: {result}")
+
+ # Batch prediction
+ batch_data = [
+ torch.randn(1, 3, 224, 224),
+ torch.randn(1, 3, 224, 224),
+ torch.randn(1, 3, 224, 224)
+ ]
+
+ print(f"\n๐ฆ Batch Prediction (batch size: {len(batch_data)}):")
+ # results = framework.predict_batch(batch_data)
+ # for i, result in enumerate(results):
+ # print(f" Image {i+1}: {result}")
+
+ print("\nโ
Basic inference example completed!")
+
+def optimized_inference_example():
+ """
+ Example with automatic optimization enabled
+ """
+ print("\nโก Optimized Inference Example")
+ print("=" * 50)
+
+ # Enable automatic optimization
+ # framework = create_pytorch_framework(
+ # model_path="path/to/your/model.pt",
+ # device="cuda" if torch.cuda.is_available() else "cpu",
+ # enable_optimization=True, # Automatic TensorRT/ONNX optimization
+ # optimization_level="balanced" # Options: conservative, balanced, aggressive
+ # )
+
+ # Example input for optimization
+ input_data = torch.randn(1, 3, 224, 224)
+
+ print("๐ง Framework will automatically:")
+ print(" - Detect optimal optimization method")
+ print(" - Apply TensorRT if available")
+ print(" - Fallback to ONNX or JIT compilation")
+ print(" - Benchmark all methods and select best")
+
+ # The framework handles optimization automatically
+ # result = framework.predict(input_data)
+
+ # Get optimization report
+ # report = framework.get_optimization_report()
+ # print(f"\n๐ Optimization Report:")
+ # print(f" Active optimization: {report.get('active_optimization', 'none')}")
+ # print(f" Speedup achieved: {report.get('speedup', 1.0):.2f}x")
+ # print(f" Memory saved: {report.get('memory_reduction', 0):.1%}")
+
+ print("\nโ
Optimized inference example completed!")
+
+def configuration_example():
+ """
+ Example showing different configuration options
+ """
+ print("\n๐ง Configuration Example")
+ print("=" * 50)
+
+ # from framework.core.config import InferenceConfig, DeviceConfig
+
+ # # Create custom configuration
+ # config = InferenceConfig(
+ # model_path="path/to/model.pt",
+ # device=DeviceConfig(
+ # device_type="cuda",
+ # gpu_id=0,
+ # memory_fraction=0.8,
+ # use_fp16=True # Half precision for 2x speedup
+ # ),
+ # batch_size=8,
+ # enable_monitoring=True
+ # )
+
+ # # Create framework with configuration
+ # framework = create_framework(config)
+
+ print("โ๏ธ Configuration options available:")
+ print(" - Device selection (CPU/CUDA/Auto)")
+ print(" - Memory management")
+ print(" - Precision settings (FP32/FP16)")
+ print(" - Batch processing")
+ print(" - Performance monitoring")
+ print(" - Optimization preferences")
+
+ print("\nโ
Configuration example completed!")
+
+if __name__ == "__main__":
+ """
+ Run basic usage examples
+
+ To run this example:
+ uv run python examples/basic_usage.py
+ """
+
+ print("๐ฏ PyTorch Inference Framework - Basic Usage Examples")
+ print("=" * 60)
+
+ # Check PyTorch installation
+ print(f"๐ฅ PyTorch version: {torch.__version__}")
+ print(f"๐ฅ๏ธ CUDA available: {torch.cuda.is_available()}")
+ if torch.cuda.is_available():
+ print(f"๐ GPU device: {torch.cuda.get_device_name(0)}")
+ print(f"๐พ GPU memory: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB")
+
+ try:
+ # Run examples
+ basic_inference_example()
+ optimized_inference_example()
+ configuration_example()
+
+ print("\n๐ All examples completed successfully!")
+ print("\nNext steps:")
+ print(" - Try async_processing.py for high-throughput processing")
+ print(" - See fastapi_server.py for REST API integration")
+ print(" - Run ../optimization_demo.py for complete optimization showcase")
+
+ except Exception as e:
+ print(f"\nโ Example failed: {e}")
+ print("Make sure to adjust model paths and install required dependencies")
diff --git a/examples/config_example.py b/examples/config_example.py
new file mode 100644
index 0000000..8ecaadd
--- /dev/null
+++ b/examples/config_example.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python3
+"""
+Configuration Example for PyTorch Inference Framework
+
+This script demonstrates how to use the new configuration management system
+with .env files and config.yaml files.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+# Add project root to path
+project_root = Path(__file__).parent.parent
+if str(project_root) not in sys.path:
+ sys.path.insert(0, str(project_root))
+
+from framework.core.config_manager import get_config_manager, ConfigManager
+
+
+def main():
+ """Demonstrate configuration management."""
+ print("๐ง PyTorch Inference Framework - Configuration Example")
+ print("=" * 60)
+
+ # 1. Default configuration (development environment)
+ print("\n1. Loading default configuration...")
+ config_manager = get_config_manager()
+
+ print(f"Environment: {config_manager.environment}")
+ print(f"Config files:")
+ print(f" - .env file: {config_manager.env_file}")
+ print(f" - YAML file: {config_manager.config_file}")
+
+ # 2. Server configuration
+ print("\n2. Server Configuration:")
+ server_config = config_manager.get_server_config()
+ for key, value in server_config.items():
+ print(f" {key}: {value}")
+
+ # 3. Inference configuration
+ print("\n3. Inference Configuration:")
+ inference_config = config_manager.get_inference_config()
+ print(f" Device type: {inference_config.device.device_type.value}")
+ print(f" Device ID: {inference_config.device.device_id}")
+ print(f" Use FP16: {inference_config.device.use_fp16}")
+ print(f" Batch size: {inference_config.batch.batch_size}")
+ print(f" Max batch size: {inference_config.batch.max_batch_size}")
+ print(f" Input size: {inference_config.preprocessing.input_size}")
+ print(f" Warmup iterations: {inference_config.performance.warmup_iterations}")
+
+ # 4. Configuration precedence example
+ print("\n4. Configuration Precedence Example:")
+ print(" Environment Variable -> YAML Config -> Default Value")
+
+ # Test with a sample configuration key
+ batch_size = config_manager.get('BATCH_SIZE', 1, 'batch.batch_size')
+ print(f" Batch size: {batch_size}")
+
+ device_type = config_manager.get('DEVICE', 'cpu', 'device.type')
+ print(f" Device type: {device_type}")
+
+ log_level = config_manager.get('LOG_LEVEL', 'INFO', 'server.log_level')
+ print(f" Log level: {log_level}")
+
+ # 5. Environment-specific configuration
+ print("\n5. Environment-specific Configuration:")
+
+ # Test different environments
+ for env in ['development', 'staging', 'production']:
+ print(f"\n {env.upper()} Environment:")
+ env_config_manager = ConfigManager(environment=env)
+ env_server_config = env_config_manager.get_server_config()
+ print(f" Reload: {env_server_config['reload']}")
+ print(f" Log level: {env_server_config['log_level']}")
+ print(f" Workers: {env_server_config['workers']}")
+
+ # 6. Enterprise configuration (if available)
+ print("\n6. Enterprise Configuration:")
+ enterprise_config = config_manager.get_enterprise_config()
+ if enterprise_config:
+ print(f" Environment: {enterprise_config.environment}")
+ print(f" Auth provider: {enterprise_config.auth.provider.value}")
+ print(f" Rate limiting: {enterprise_config.security.enable_rate_limiting}")
+ else:
+ print(" Enterprise features disabled or not available")
+
+ # 7. Configuration export
+ print("\n7. Configuration Export:")
+ exported_config = config_manager.export_config()
+ print(f" Environment: {exported_config['environment']}")
+ print(f" Env file: {exported_config['env_file']}")
+ print(f" Config file: {exported_config['config_file']}")
+
+ print("\nโ
Configuration example completed!")
+ print("\n๐ก Tips:")
+ print(" - Modify .env file to override environment variables")
+ print(" - Modify config.yaml to change base configuration")
+ print(" - Set ENVIRONMENT=production to use production settings")
+ print(" - Check /config endpoint when running the server")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/config_modification_examples.py b/examples/config_modification_examples.py
new file mode 100644
index 0000000..c306547
--- /dev/null
+++ b/examples/config_modification_examples.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python3
+"""
+Configuration Modification Examples
+
+This script demonstrates how to modify configuration through environment variables
+and see the effects on the application behavior.
+"""
+
+import os
+import sys
+from pathlib import Path
+
+# Add project root to path
+project_root = Path(__file__).parent.parent
+if str(project_root) not in sys.path:
+ sys.path.insert(0, str(project_root))
+
+from framework.core.config_manager import ConfigManager
+
+
+def test_configuration_changes():
+ """Test different configuration scenarios."""
+
+ print("๐ง Configuration Modification Examples")
+ print("=" * 50)
+
+ # Example 1: Default configuration
+ print("\n1. Default Configuration (development):")
+ config_manager = ConfigManager(environment='development')
+ inference_config = config_manager.get_inference_config()
+ print(f" Device: {inference_config.device.device_type.value}")
+ print(f" Batch size: {inference_config.batch.batch_size}")
+ print(f" Use FP16: {inference_config.device.use_fp16}")
+ print(f" Log level: {inference_config.performance.log_level}")
+
+ # Example 2: Production configuration
+ print("\n2. Production Configuration:")
+ config_manager_prod = ConfigManager(environment='production')
+ inference_config_prod = config_manager_prod.get_inference_config()
+ print(f" Device: {inference_config_prod.device.device_type.value}")
+ print(f" Batch size: {inference_config_prod.batch.batch_size}")
+ print(f" Use FP16: {inference_config_prod.device.use_fp16}")
+ print(f" Log level: {inference_config_prod.performance.log_level}")
+
+ # Example 3: Environment variable overrides
+ print("\n3. Environment Variable Override Example:")
+ print(" Setting environment variables...")
+
+ # Set some environment variables to override configuration
+ os.environ['DEVICE'] = 'cuda'
+ os.environ['BATCH_SIZE'] = '8'
+ os.environ['USE_FP16'] = 'true'
+ os.environ['LOG_LEVEL'] = 'DEBUG'
+
+ # Create new config manager to pick up changes
+ config_manager_override = ConfigManager(environment='development')
+ inference_config_override = config_manager_override.get_inference_config()
+
+ print(f" Device: {inference_config_override.device.device_type.value}")
+ print(f" Batch size: {inference_config_override.batch.batch_size}")
+ print(f" Use FP16: {inference_config_override.device.use_fp16}")
+ print(f" Log level: {inference_config_override.performance.log_level}")
+
+ # Clean up environment variables
+ for key in ['DEVICE', 'BATCH_SIZE', 'USE_FP16', 'LOG_LEVEL']:
+ if key in os.environ:
+ del os.environ[key]
+
+ # Example 4: Server configuration changes
+ print("\n4. Server Configuration Examples:")
+
+ environments = ['development', 'staging', 'production']
+ for env in environments:
+ config_mgr = ConfigManager(environment=env)
+ server_config = config_mgr.get_server_config()
+ print(f" {env.upper()}:")
+ print(f" Host: {server_config['host']}")
+ print(f" Port: {server_config['port']}")
+ print(f" Reload: {server_config['reload']}")
+ print(f" Workers: {server_config['workers']}")
+
+ print("\nโ
Configuration modification examples completed!")
+
+ print("\n๐ How to modify configuration:")
+ print(" 1. Edit .env file to change environment variables")
+ print(" 2. Edit config.yaml to change base configuration")
+ print(" 3. Set ENVIRONMENT variable to change environment")
+ print(" 4. Override individual values with environment variables")
+
+ print("\n๐ Examples:")
+ print(" # To use CUDA with larger batches:")
+ print(" echo 'DEVICE=cuda' >> .env")
+ print(" echo 'BATCH_SIZE=16' >> .env")
+ print(" echo 'USE_FP16=true' >> .env")
+ print()
+ print(" # To run in production mode:")
+ print(" echo 'ENVIRONMENT=production' >> .env")
+ print()
+ print(" # To enable debug logging:")
+ print(" echo 'LOG_LEVEL=DEBUG' >> .env")
+
+
+if __name__ == "__main__":
+ test_configuration_changes()
diff --git a/examples/enterprise_example.py b/examples/enterprise_example.py
new file mode 100644
index 0000000..899936b
--- /dev/null
+++ b/examples/enterprise_example.py
@@ -0,0 +1,571 @@
+#!/usr/bin/env python3
+"""
+Enterprise PyTorch Inference Framework - Complete Example
+
+This example demonstrates all enterprise features including:
+- Authentication and authorization
+- Security and encryption
+- Monitoring and observability
+- Model governance and MLOps
+- High availability deployment
+
+Usage:
+ python enterprise_example.py
+"""
+
+import asyncio
+import os
+import logging
+import numpy as np
+from pathlib import Path
+from typing import Dict, Any, List
+import torch
+import time
+
+# Add framework to path
+import sys
+sys.path.insert(0, str(Path(__file__).parent))
+
+from framework.enterprise import (
+ EnterpriseInferenceEngine,
+ EnterpriseConfig,
+ EnterpriseAuth,
+ SecurityManager,
+ EnterpriseMonitor,
+ ModelGovernance
+)
+
+logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+)
+logger = logging.getLogger(__name__)
+
+
+class SimpleModel(torch.nn.Module):
+ """Simple PyTorch model for demonstration."""
+
+ def __init__(self, input_size: int = 784, num_classes: int = 10):
+ super().__init__()
+ self.fc1 = torch.nn.Linear(input_size, 128)
+ self.fc2 = torch.nn.Linear(128, 64)
+ self.fc3 = torch.nn.Linear(64, num_classes)
+ self.relu = torch.nn.ReLU()
+ self.dropout = torch.nn.Dropout(0.2)
+
+ def forward(self, x):
+ x = x.view(x.size(0), -1)
+ x = self.relu(self.fc1(x))
+ x = self.dropout(x)
+ x = self.relu(self.fc2(x))
+ x = self.dropout(x)
+ x = self.fc3(x)
+ return torch.nn.functional.softmax(x, dim=1)
+
+
+class EnterpriseDemo:
+ """Enterprise inference framework demonstration."""
+
+ def __init__(self):
+ # Setup paths
+ self.model_path = Path("models")
+ self.model_path.mkdir(exist_ok=True)
+
+ # Initialize components
+ self.config = self._setup_config()
+ self.engine = None
+ self.auth = None
+ self.security = None
+ self.monitor = None
+ self.governance = None
+
+ def _setup_config(self) -> EnterpriseConfig:
+ """Setup enterprise configuration."""
+ config = EnterpriseConfig()
+
+ # Override for demo
+ config.auth.enable_mfa = False # Disable MFA for demo
+ config.auth.jwt_expiry_hours = 24
+ config.monitoring.enable_distributed_tracing = True
+ config.monitoring.metrics_port = 9090
+ config.security.enable_encryption = True
+ config.governance.enable_model_validation = True
+
+ return config
+
+ async def setup_enterprise_components(self):
+ """Initialize all enterprise components."""
+ logger.info("Initializing enterprise components...")
+
+ # Authentication
+ self.auth = EnterpriseAuth(self.config)
+ logger.info("โ Authentication system initialized")
+
+ # Security
+ self.security = SecurityManager(self.config)
+ logger.info("โ Security manager initialized")
+
+ # Monitoring
+ self.monitor = EnterpriseMonitor(self.config)
+ logger.info("โ Monitoring system initialized")
+
+ # Model Governance
+ self.governance = ModelGovernance(self.config, self.monitor)
+ logger.info("โ Model governance initialized")
+
+ # Enterprise Engine
+ self.engine = EnterpriseInferenceEngine(self.config)
+ logger.info("โ Enterprise inference engine initialized")
+
+ logger.info("๐ All enterprise components ready!")
+
+ def create_sample_model(self) -> str:
+ """Create and save a sample model."""
+ logger.info("Creating sample model...")
+
+ model = SimpleModel()
+ model.eval()
+
+ # Save model
+ model_path = self.model_path / "sample_model.pt"
+ torch.save({
+ 'model_state_dict': model.state_dict(),
+ 'model_class': 'SimpleModel',
+ 'input_shape': [1, 28, 28],
+ 'output_classes': 10,
+ 'version': '1.0.0'
+ }, model_path)
+
+ logger.info(f"โ Sample model saved to {model_path}")
+ return str(model_path)
+
+ def setup_users_and_roles(self):
+ """Setup demo users and roles."""
+ logger.info("Setting up demo users and roles...")
+
+ # Create admin user
+ admin_user = self.auth.create_user(
+ username="admin",
+ email="admin@company.com",
+ full_name="System Administrator",
+ password="admin123", # Use strong passwords in production!
+ roles=["admin"]
+ )
+ logger.info(f"โ Created admin user: {admin_user.username}")
+
+ # Create data scientist user
+ ds_user = self.auth.create_user(
+ username="data_scientist",
+ email="ds@company.com",
+ full_name="Data Scientist",
+ password="ds123",
+ roles=["data_scientist"]
+ )
+ logger.info(f"โ Created data scientist user: {ds_user.username}")
+
+ # Create regular user
+ user = self.auth.create_user(
+ username="user1",
+ email="user1@company.com",
+ full_name="Regular User",
+ password="user123",
+ roles=["user"]
+ )
+ logger.info(f"โ Created regular user: {user.username}")
+
+ # Create API key for programmatic access
+ api_key = self.auth.create_api_key(
+ name="Integration API Key",
+ user_id=admin_user.id,
+ scopes=["inference:predict", "model:read", "metrics:read"]
+ )
+ logger.info(f"โ Created API key: {api_key.name}")
+
+ return {
+ 'admin_user': admin_user,
+ 'ds_user': ds_user,
+ 'regular_user': user,
+ 'api_key': api_key
+ }
+
+ async def demonstrate_authentication(self, users: Dict):
+ """Demonstrate authentication features."""
+ logger.info("\n" + "="*50)
+ logger.info("๐ AUTHENTICATION DEMONSTRATION")
+ logger.info("="*50)
+
+ # Login with username/password
+ admin_token = await self.auth.authenticate_user("admin", "admin123")
+ logger.info(f"โ Admin login successful, token: {admin_token[:20]}...")
+
+ # Validate token
+ claims = await self.auth.validate_token(admin_token)
+ logger.info(f"โ Token validation successful for user: {claims.get('sub')}")
+
+ # Check permissions
+ has_admin_perm = await self.auth.check_permission(admin_token, "admin:*")
+ logger.info(f"โ Admin has admin permissions: {has_admin_perm}")
+
+ # Test API key authentication
+ api_key = users['api_key']
+ key_valid = await self.auth.validate_api_key(api_key.key)
+ logger.info(f"โ API key validation: {key_valid is not None}")
+
+ # Test unauthorized access
+ try:
+ user_token = await self.auth.authenticate_user("user1", "user123")
+ has_admin_perm = await self.auth.check_permission(user_token, "admin:delete_model")
+ logger.info(f"โ Regular user admin permissions: {has_admin_perm}")
+ except Exception as e:
+ logger.info(f"โ Unauthorized access properly blocked: {type(e).__name__}")
+
+ async def demonstrate_security(self):
+ """Demonstrate security features."""
+ logger.info("\n" + "="*50)
+ logger.info("๐ก๏ธ SECURITY DEMONSTRATION")
+ logger.info("="*50)
+
+ # Input validation
+ test_data = {"model": "test", "input": [1, 2, 3]}
+ is_valid, validation_result = self.security.validate_request("test_client", test_data)
+ logger.info(f"โ Input validation result: {is_valid}")
+
+ # Data encryption
+ sensitive_data = "This is sensitive model data"
+ encrypted = self.security.encrypt_data(sensitive_data)
+ decrypted = self.security.decrypt_data(encrypted)
+ logger.info(f"โ Encryption/Decryption successful: {decrypted == sensitive_data}")
+
+ # Rate limiting test
+ client_id = "test_client"
+ for i in range(5):
+ allowed = self.security.rate_limiter.check_rate_limit(client_id)
+ logger.info(f"Request {i+1} allowed: {allowed}")
+ if not allowed:
+ logger.info("โ Rate limiting working correctly")
+ break
+
+ # Threat detection simulation
+ suspicious_patterns = [
+ "'; DROP TABLE users; --", # SQL injection
+ "", # XSS
+ "../../../etc/passwd", # Path traversal
+ ]
+
+ for pattern in suspicious_patterns:
+ threat_detected = self.security.threat_detector.detect_threats({"input": pattern})
+ logger.info(f"โ Threat detection for '{pattern[:20]}...': {threat_detected['threat_detected']}")
+
+ async def demonstrate_monitoring(self):
+ """Demonstrate monitoring and observability."""
+ logger.info("\n" + "="*50)
+ logger.info("๐ MONITORING DEMONSTRATION")
+ logger.info("="*50)
+
+ # Health check
+ health_status = self.monitor.get_health_status()
+ logger.info(f"โ System health status: {health_status['status']}")
+
+ # Custom metrics
+ self.monitor.record_inference_metrics(
+ model_name="sample_model",
+ version="1.0.0",
+ latency=0.15,
+ memory_usage=256,
+ gpu_utilization=75.5
+ )
+ logger.info("โ Custom metrics recorded")
+
+ # Distributed tracing simulation
+ with self.monitor.create_trace_span("inference_request") as span:
+ span.set_attribute("model.name", "sample_model")
+ span.set_attribute("request.size", 1024)
+
+ # Simulate some work
+ await asyncio.sleep(0.1)
+
+ with self.monitor.create_trace_span("model_prediction", parent=span) as child_span:
+ child_span.set_attribute("prediction.confidence", 0.95)
+ await asyncio.sleep(0.05)
+
+ logger.info("โ Distributed tracing spans created")
+
+ # Metrics collection
+ current_metrics = self.monitor.get_metrics()
+ logger.info(f"โ Current metrics collected: {len(current_metrics)} metrics")
+
+ # Alert simulation (would normally integrate with external systems)
+ alert_sent = self.monitor.send_alert(
+ "high_latency",
+ "Inference latency exceeded threshold",
+ {"model": "sample_model", "latency": 2.5}
+ )
+ logger.info(f"โ Alert sent: {alert_sent}")
+
+ async def demonstrate_model_governance(self, model_path: str):
+ """Demonstrate model governance and MLOps."""
+ logger.info("\n" + "="*50)
+ logger.info("๐ฏ MODEL GOVERNANCE DEMONSTRATION")
+ logger.info("="*50)
+
+ # Register model
+ model_info = await self.governance.register_model(
+ name="sample_classifier",
+ framework="pytorch",
+ version="1.0.0",
+ file_path=model_path,
+ description="Sample classification model for demonstration",
+ metadata={
+ "accuracy": 0.95,
+ "f1_score": 0.93,
+ "training_dataset": "demo_dataset_v1",
+ "hyperparameters": {
+ "learning_rate": 0.001,
+ "batch_size": 32,
+ "epochs": 100
+ }
+ }
+ )
+ logger.info(f"โ Model registered: {model_info['name']} v{model_info['version']}")
+
+ # Model validation
+ is_valid = await self.governance.validate_model(model_info['id'])
+ logger.info(f"โ Model validation result: {is_valid}")
+
+ # Start experiment
+ experiment = await self.governance.start_experiment(
+ name="model_performance_test",
+ description="Testing model performance with new data",
+ model_id=model_info['id'],
+ hyperparameters={
+ "test_batch_size": 64,
+ "confidence_threshold": 0.8
+ }
+ )
+ logger.info(f"โ Experiment started: {experiment['name']}")
+
+ # Log experiment results
+ await self.governance.log_experiment_result(
+ experiment['id'],
+ metrics={
+ "accuracy": 0.96,
+ "precision": 0.94,
+ "recall": 0.95,
+ "inference_time": 0.12
+ },
+ artifacts=["confusion_matrix.png", "roc_curve.png"]
+ )
+ logger.info("โ Experiment results logged")
+
+ # A/B testing setup
+ ab_test = await self.governance.setup_ab_test(
+ name="model_v1_vs_v2",
+ control_model_id=model_info['id'],
+ treatment_model_version="1.1.0",
+ traffic_split=0.2 # 20% to new version
+ )
+ logger.info(f"โ A/B test configured: {ab_test['name']}")
+
+ # Model deployment
+ deployment = await self.governance.deploy_model(
+ model_id=model_info['id'],
+ environment="staging",
+ config={
+ "replicas": 2,
+ "cpu_limit": "2",
+ "memory_limit": "4Gi",
+ "gpu_required": True
+ }
+ )
+ logger.info(f"โ Model deployed to staging: {deployment['deployment_id']}")
+
+ async def demonstrate_enterprise_inference(self, users: Dict, model_path: str):
+ """Demonstrate enterprise inference with security and monitoring."""
+ logger.info("\n" + "="*50)
+ logger.info("๐ ENTERPRISE INFERENCE DEMONSTRATION")
+ logger.info("="*50)
+
+ # Load model into engine
+ await self.engine.load_model(model_path, "sample_model")
+ logger.info("โ Model loaded into enterprise engine")
+
+ # Prepare sample data
+ sample_input = torch.randn(1, 28, 28) # MNIST-like input
+
+ # Authenticated inference with admin user
+ admin_token = await self.auth.authenticate_user("admin", "admin123")
+
+ start_time = time.time()
+ result = await self.engine.secure_predict(
+ model_name="sample_model",
+ input_data=sample_input.numpy().tolist(),
+ auth_token=admin_token,
+ client_id="demo_client",
+ trace_id="demo_trace_001"
+ )
+ inference_time = time.time() - start_time
+
+ logger.info(f"โ Secure inference completed in {inference_time:.3f}s")
+ logger.info(f" Prediction shape: {np.array(result['prediction']).shape}")
+ logger.info(f" Confidence: {max(result['prediction']):.3f}")
+ logger.info(f" Trace ID: {result['trace_id']}")
+
+ # Test batch inference
+ batch_input = torch.randn(5, 28, 28)
+ batch_result = await self.engine.secure_batch_predict(
+ model_name="sample_model",
+ input_data=batch_input.numpy().tolist(),
+ auth_token=admin_token,
+ client_id="demo_client"
+ )
+ logger.info(f"โ Batch inference completed: {len(batch_result['predictions'])} predictions")
+
+ # Test unauthorized access
+ try:
+ user_token = await self.auth.authenticate_user("user1", "user123")
+ # This should work as regular users have inference permission
+ user_result = await self.engine.secure_predict(
+ model_name="sample_model",
+ input_data=sample_input.numpy().tolist(),
+ auth_token=user_token,
+ client_id="user_client"
+ )
+ logger.info("โ Regular user inference successful")
+ except Exception as e:
+ logger.info(f"โ Regular user access denied: {e}")
+
+ # Test API key access
+ api_key = users['api_key']
+ api_result = await self.engine.secure_predict(
+ model_name="sample_model",
+ input_data=sample_input.numpy().tolist(),
+ api_key=api_key.key,
+ client_id="api_client"
+ )
+ logger.info("โ API key inference successful")
+
+ async def demonstrate_compliance_and_audit(self):
+ """Demonstrate compliance and audit features."""
+ logger.info("\n" + "="*50)
+ logger.info("๐ COMPLIANCE & AUDIT DEMONSTRATION")
+ logger.info("="*50)
+
+ # Generate compliance report
+ compliance_report = await self.engine.generate_compliance_report(
+ start_date="2024-01-01",
+ end_date="2024-12-31",
+ include_sections=["security", "data_protection", "model_governance", "audit_logs"]
+ )
+
+ logger.info("โ Compliance report generated")
+ logger.info(f" Report ID: {compliance_report['report_id']}")
+ logger.info(f" Sections: {', '.join(compliance_report['sections'])}")
+ logger.info(f" Total findings: {compliance_report['summary']['total_findings']}")
+
+ # Audit trail
+ audit_events = self.security.audit_logger.get_recent_events(limit=10)
+ logger.info(f"โ Recent audit events: {len(audit_events)} events")
+
+ for event in audit_events[-3:]: # Show last 3 events
+ logger.info(f" - {event['action']} by {event.get('user', 'system')} at {event['timestamp']}")
+
+ # Data lineage tracking
+ lineage_info = await self.governance.get_data_lineage("sample_model")
+ logger.info("โ Data lineage information retrieved")
+ logger.info(f" Source datasets: {len(lineage_info.get('datasets', []))}")
+ logger.info(f" Processing steps: {len(lineage_info.get('processing_steps', []))}")
+
+ async def demonstrate_high_availability(self):
+ """Demonstrate high availability features."""
+ logger.info("\n" + "="*50)
+ logger.info("๐ HIGH AVAILABILITY DEMONSTRATION")
+ logger.info("="*50)
+
+ # Health checks
+ health = await self.engine.health_check()
+ logger.info(f"โ Health check: {health['status']}")
+ logger.info(f" Uptime: {health.get('uptime', 'N/A')}")
+ logger.info(f" Memory usage: {health.get('memory_usage', 'N/A')}")
+ logger.info(f" GPU availability: {health.get('gpu_available', False)}")
+
+ # Readiness check
+ readiness = await self.engine.readiness_check()
+ logger.info(f"โ Readiness check: {readiness['ready']}")
+
+ # Circuit breaker simulation
+ logger.info("Testing circuit breaker (simulating failures)...")
+ failure_count = 0
+ for i in range(10):
+ try:
+ # Simulate some failures
+ if i % 3 == 0: # Every 3rd request fails
+ raise Exception("Simulated failure")
+ logger.info(f" Request {i+1}: Success")
+ except Exception:
+ failure_count += 1
+ logger.info(f" Request {i+1}: Failed ({failure_count} failures)")
+
+ logger.info(f"โ Circuit breaker simulation completed: {failure_count} failures handled")
+
+ # Load balancing simulation
+ logger.info("Simulating load balancing across multiple replicas...")
+ for i in range(5):
+ replica_id = f"replica_{(i % 3) + 1}"
+ logger.info(f" Request {i+1} -> {replica_id}")
+
+ logger.info("โ Load balancing simulation completed")
+
+ async def run_complete_demo(self):
+ """Run the complete enterprise demonstration."""
+ logger.info("๐ฌ Starting Enterprise PyTorch Inference Framework Demo")
+ logger.info("=" * 60)
+
+ try:
+ # Setup
+ await self.setup_enterprise_components()
+ model_path = self.create_sample_model()
+ users = self.setup_users_and_roles()
+
+ # Demonstrations
+ await self.demonstrate_authentication(users)
+ await self.demonstrate_security()
+ await self.demonstrate_monitoring()
+ await self.demonstrate_model_governance(model_path)
+ await self.demonstrate_enterprise_inference(users, model_path)
+ await self.demonstrate_compliance_and_audit()
+ await self.demonstrate_high_availability()
+
+ # Final summary
+ logger.info("\n" + "="*60)
+ logger.info("๐ ENTERPRISE DEMO COMPLETED SUCCESSFULLY!")
+ logger.info("="*60)
+ logger.info("\nโ
All enterprise features demonstrated:")
+ logger.info(" ๐ Authentication & Authorization")
+ logger.info(" ๐ก๏ธ Security & Encryption")
+ logger.info(" ๐ Monitoring & Observability")
+ logger.info(" ๐ฏ Model Governance & MLOps")
+ logger.info(" ๐ Secure Inference Engine")
+ logger.info(" ๐ Compliance & Audit")
+ logger.info(" ๐ High Availability")
+
+ logger.info("\n๐ Ready for production deployment!")
+ logger.info("\nNext steps:")
+ logger.info(" 1. Run setup_enterprise.py for full deployment")
+ logger.info(" 2. Configure production secrets and certificates")
+ logger.info(" 3. Deploy using Docker Compose or Kubernetes")
+ logger.info(" 4. Setup monitoring dashboards and alerts")
+ logger.info(" 5. Configure backup and disaster recovery")
+
+ except Exception as e:
+ logger.error(f"โ Demo failed with error: {e}")
+ raise
+
+
+async def main():
+ """Main function to run the enterprise demo."""
+ demo = EnterpriseDemo()
+ await demo.run_complete_demo()
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/fie_structure.txt b/fie_structure.txt
deleted file mode 100644
index adbabf5..0000000
--- a/fie_structure.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-inference_engine/
-โโโ core/
-โ โโโ __init__.py
-โ โโโ engine.py
-โ โโโ preprocessor.py
-โ โโโ postprocessor.py
-โโโ utils/
-โ โโโ __init__.py
-โ โโโ logger.py
-โ โโโ monitor.py
-โ โโโ config.py
-โโโ api/
-โ โโโ __init__.py
-โ โโโ server.py
-โ โโโ schemas.py
-โโโ tests/
-โ โโโ __init__.py
-โ โโโ test_engine.py
-โ โโโ test_api.py
-โโโ models/
-โ โโโ model_store/
-โโโ Dockerfile
-โโโ requirements.txt
-โโโ main.py
\ No newline at end of file
diff --git a/framework/__init__.py b/framework/__init__.py
new file mode 100644
index 0000000..36d734e
--- /dev/null
+++ b/framework/__init__.py
@@ -0,0 +1,618 @@
+"""
+Main framework interface for PyTorch inference.
+
+This module provides the main entry point for the inference framework,
+combining all components into an easy-to-use API.
+"""
+
+import asyncio
+import logging
+from typing import Any, Dict, List, Optional, Union, Tuple
+from pathlib import Path
+import time
+from contextlib import asynccontextmanager
+
+from .core.config import InferenceConfig, ModelType, ConfigFactory
+from .core.base_model import BaseModel, get_model_manager
+from .core.inference_engine import InferenceEngine, create_inference_engine
+from .core.optimized_model import OptimizedModel, create_optimized_model
+from .adapters.model_adapters import load_model
+from .utils.monitoring import get_performance_monitor, get_metrics_collector
+
+logger = logging.getLogger(__name__)
+
+# Import optimizers with error handling
+try:
+ from .optimizers import (
+ TensorRTOptimizer, ONNXOptimizer, QuantizationOptimizer,
+ MemoryOptimizer, CUDAOptimizer, JITOptimizer,
+ convert_to_tensorrt, convert_to_onnx, quantize_model,
+ enable_cuda_optimizations, jit_compile_model
+ )
+except ImportError as e:
+ logger.warning(f"Some optimizers not available: {e}")
+ # Define dummy functions/classes for missing optimizers
+ TensorRTOptimizer = None
+ ONNXOptimizer = None
+ QuantizationOptimizer = None
+ MemoryOptimizer = None
+ CUDAOptimizer = None
+ JITOptimizer = None
+ convert_to_tensorrt = None
+ convert_to_onnx = None
+ quantize_model = None
+ enable_cuda_optimizations = None
+ jit_compile_model = None
+
+
+class TorchInferenceFramework:
+ """
+ Main framework class for PyTorch inference.
+
+ This class provides a high-level interface for loading models,
+ running inference, and managing the entire inference pipeline.
+ """
+
+ def __init__(self, config: Optional[InferenceConfig] = None):
+ """
+ Initialize the framework.
+
+ Args:
+ config: Inference configuration. If None, will use global config.
+ """
+ if config is None:
+ from .core.config import get_global_config
+ config = get_global_config()
+
+ self.config = config
+ self.model: Optional[BaseModel] = None
+ self.engine: Optional[InferenceEngine] = None
+ self._model_manager = get_model_manager() # Store the manager instance
+ self.performance_monitor = get_performance_monitor()
+ self.metrics_collector = get_metrics_collector()
+
+ # State tracking
+ self._initialized = False
+ self._engine_running = False
+
+ self.logger = logging.getLogger(f"{__name__}.TorchInferenceFramework")
+
+ # Configure logging
+ self._setup_logging()
+
+ self.logger.info("TorchInferenceFramework initialized")
+
+ @property
+ def model_manager(self):
+ """Backward compatibility property for model_manager."""
+ return self._model_manager
+
+ def _setup_logging(self):
+ """Setup logging configuration."""
+ log_level = getattr(self.config.performance, 'log_level', 'INFO')
+ logging.basicConfig(
+ level=getattr(logging, log_level.upper()),
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+ )
+
+ def load_model(self, model_path: Union[str, Path], model_name: Optional[str] = None) -> None:
+ """
+ Load a model from file or identifier.
+
+ Args:
+ model_path: Path to model file or model identifier (e.g., HuggingFace model name)
+ model_name: Optional name for the model (for model manager)
+ """
+ try:
+ self.logger.info(f"Loading model from: {model_path}")
+
+ # Load model using adapter factory
+ self.model = load_model(model_path, self.config)
+
+ # Register with model manager
+ if model_name is None:
+ model_name = Path(model_path).stem if isinstance(model_path, (str, Path)) else str(model_path)
+
+ self._model_manager.register_model(model_name, self.model)
+
+ # Create inference engine
+ self.engine = create_inference_engine(self.model, self.config)
+
+ self._initialized = True
+ self.logger.info(f"Successfully loaded model: {model_name}")
+
+ except Exception as e:
+ self.logger.error(f"Failed to load model: {e}")
+ raise
+
+ async def start_engine(self) -> None:
+ """Start the inference engine for async processing."""
+ if not self._initialized:
+ raise RuntimeError("Model not loaded. Call load_model() first.")
+
+ if not self.engine:
+ raise RuntimeError("Inference engine not initialized.")
+
+ await self.engine.start()
+ self._engine_running = True
+ self.logger.info("Inference engine started")
+
+ async def stop_engine(self) -> None:
+ """Stop the inference engine."""
+ if self.engine and self._engine_running:
+ await self.engine.stop()
+ self._engine_running = False
+ self.logger.info("Inference engine stopped")
+
+ def predict(self, inputs: Any, **kwargs) -> Any:
+ """
+ Run inference on inputs (synchronous).
+
+ Args:
+ inputs: Input data (image path, tensor, text, etc.)
+ **kwargs: Additional arguments passed to prediction
+
+ Returns:
+ Prediction results
+ """
+ if not self._initialized:
+ raise RuntimeError("Model not loaded. Call load_model() first.")
+
+ # Track performance
+ request_id = f"sync_{int(time.time() * 1000000)}"
+ self.performance_monitor.start_request(request_id)
+ try:
+ result = self.model.predict(inputs)
+ self.performance_monitor.end_request(request_id)
+ return result
+ except Exception as e:
+ self.performance_monitor.end_request(request_id)
+ raise
+
+ async def predict_async(self, inputs: Any, priority: int = 0,
+ timeout: Optional[float] = None, **kwargs) -> Any:
+ """
+ Run inference on inputs (asynchronous).
+
+ Args:
+ inputs: Input data
+ priority: Request priority (higher = processed first)
+ timeout: Timeout in seconds
+ **kwargs: Additional arguments
+
+ Returns:
+ Prediction results
+ """
+ if not self._initialized:
+ raise RuntimeError("Model not loaded. Call load_model() first.")
+
+ if not self._engine_running:
+ raise RuntimeError("Engine not running. Call start_engine() first.")
+
+ return await self.engine.predict(inputs, priority, timeout)
+
+ def predict_batch(self, inputs_list: List[Any], **kwargs) -> List[Any]:
+ """
+ Run batch inference (synchronous).
+
+ Args:
+ inputs_list: List of input data
+ **kwargs: Additional arguments
+
+ Returns:
+ List of prediction results
+ """
+ if not self._initialized:
+ raise RuntimeError("Model not loaded. Call load_model() first.")
+
+ # Use the model's predict_batch method if available
+ if hasattr(self.model, 'predict_batch'):
+ return self.model.predict_batch(inputs_list)
+
+ # Fallback to individual predictions
+ results = []
+ for i, inputs in enumerate(inputs_list):
+ request_id = f"batch_{int(time.time() * 1000000)}_{i}"
+ self.performance_monitor.start_request(request_id)
+ try:
+ result = self.model.predict(inputs)
+ results.append(result)
+ self.performance_monitor.end_request(request_id)
+ except Exception as e:
+ self.performance_monitor.end_request(request_id)
+ raise
+
+ return results
+
+ async def predict_batch_async(self, inputs_list: List[Any], priority: int = 0,
+ timeout: Optional[float] = None, **kwargs) -> List[Any]:
+ """
+ Run batch inference (asynchronous).
+
+ Args:
+ inputs_list: List of input data
+ priority: Request priority
+ timeout: Timeout in seconds
+ **kwargs: Additional arguments
+
+ Returns:
+ List of prediction results
+ """
+ if not self._initialized:
+ raise RuntimeError("Model not loaded. Call load_model() first.")
+
+ if not self._engine_running:
+ raise RuntimeError("Engine not running. Call start_engine() first.")
+
+ return await self.engine.predict_batch(inputs_list, priority, timeout)
+
+ def benchmark(self, inputs: Any, iterations: int = 100, warmup: int = 10) -> Dict[str, Any]:
+ """
+ Benchmark the model performance.
+
+ Args:
+ inputs: Sample input for benchmarking
+ iterations: Number of benchmark iterations
+ warmup: Number of warmup iterations
+
+ Returns:
+ Benchmark results
+ """
+ if not self._initialized:
+ raise RuntimeError("Model not loaded. Call load_model() first.")
+
+ self.logger.info(f"Running benchmark: {warmup} warmup + {iterations} iterations")
+
+ # Warmup
+ for _ in range(warmup):
+ _ = self.model.predict(inputs)
+
+ # Benchmark
+ times = []
+ for _ in range(iterations):
+ start_time = time.perf_counter()
+ _ = self.model.predict(inputs)
+ elapsed = time.perf_counter() - start_time
+ times.append(elapsed)
+
+ # Calculate statistics
+ import statistics
+ mean_time = statistics.mean(times)
+ median_time = statistics.median(times)
+ std_time = statistics.stdev(times) if len(times) > 1 else 0
+ min_time = min(times)
+ max_time = max(times)
+
+ results = {
+ "iterations": iterations,
+ "mean_time_ms": mean_time * 1000,
+ "median_time_ms": median_time * 1000,
+ "std_time_ms": std_time * 1000,
+ "min_time_ms": min_time * 1000,
+ "max_time_ms": max_time * 1000,
+ "throughput_fps": 1.0 / mean_time,
+ "device": str(self.model.device),
+ "model_info": self.model.model_info
+ }
+
+ self.logger.info(f"Benchmark complete: {results['throughput_fps']:.2f} FPS")
+ return results
+
+ def get_model_info(self) -> Dict[str, Any]:
+ """Get information about the loaded model."""
+ if not self._initialized:
+ return {"loaded": False}
+
+ return self.model.model_info
+
+ def get_engine_stats(self) -> Dict[str, Any]:
+ """Get inference engine statistics."""
+ if not self.engine:
+ return {"engine": "not_initialized"}
+
+ return self.engine.get_stats()
+
+ def get_performance_report(self) -> Dict[str, Any]:
+ """Get comprehensive performance report."""
+ report = {
+ "framework_info": {
+ "initialized": self._initialized,
+ "engine_running": self._engine_running,
+ "config": self.config
+ },
+ "model_info": self.get_model_info(),
+ "performance_metrics": self.performance_monitor.get_performance_summary()
+ }
+
+ if self.engine:
+ report["engine_stats"] = self.engine.get_stats()
+ report["engine_performance"] = self.engine.get_performance_report()
+
+ return report
+
+ async def health_check(self) -> Dict[str, Any]:
+ """Perform health check on the framework."""
+ health = {
+ "healthy": True,
+ "checks": {},
+ "timestamp": time.time()
+ }
+
+ # Check framework initialization
+ health["checks"]["framework_initialized"] = self._initialized
+ if not self._initialized:
+ health["healthy"] = False
+
+ # Check model
+ if self.model:
+ health["checks"]["model_loaded"] = self.model.is_loaded
+ if not self.model.is_loaded:
+ health["healthy"] = False
+ else:
+ health["checks"]["model_loaded"] = False
+ health["healthy"] = False
+
+ # Check engine
+ if self.engine:
+ engine_health = await self.engine.health_check()
+ health["checks"]["engine"] = engine_health
+ if not engine_health["healthy"]:
+ health["healthy"] = False
+ else:
+ health["checks"]["engine"] = {"healthy": False, "reason": "not_initialized"}
+ # Engine not being initialized is okay for sync-only usage
+
+ return health
+
+ async def cleanup_async(self) -> None:
+ """Cleanup all resources (async version)."""
+ self.logger.info("Cleaning up framework resources")
+
+ if self.engine and self._engine_running:
+ await self.stop_engine()
+
+ if self.model:
+ self.model.cleanup()
+
+ self._model_manager.cleanup_all()
+
+ self.logger.info("Framework cleanup complete")
+
+ def cleanup_sync(self) -> None:
+ """Synchronous cleanup for backward compatibility."""
+ self.logger.info("Cleaning up framework resources (sync)")
+
+ if self.engine and self._engine_running:
+ # For sync cleanup, we can't await, so just stop without awaiting
+ self._engine_running = False
+
+ if self.model:
+ self.model.cleanup()
+
+ self._model_manager.cleanup_all()
+
+ self.logger.info("Framework cleanup complete (sync)")
+
+ def cleanup(self) -> None:
+ """Backward compatible cleanup method."""
+ return self.cleanup_sync()
+
+ @asynccontextmanager
+ async def async_context(self):
+ """Async context manager for automatic lifecycle management."""
+ try:
+ if self.engine and not self._engine_running:
+ await self.start_engine()
+ yield self
+ finally:
+ await self.cleanup_async()
+
+ def __enter__(self):
+ """Sync context manager entry."""
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ """Sync context manager exit."""
+ if self.model:
+ self.model.cleanup()
+
+
+# Factory functions for common use cases
+
+def create_classification_framework(model_path: Union[str, Path],
+ num_classes: int,
+ class_names: Optional[List[str]] = None,
+ input_size: Tuple[int, int] = (224, 224)) -> TorchInferenceFramework:
+ """
+ Create a framework configured for image classification.
+
+ Args:
+ model_path: Path to model file
+ num_classes: Number of classification classes
+ class_names: Optional list of class names
+ input_size: Input image size
+
+ Returns:
+ Configured framework instance
+ """
+ config = ConfigFactory.create_classification_config(
+ num_classes=num_classes,
+ input_size=input_size,
+ use_softmax=True
+ )
+
+ framework = TorchInferenceFramework(config)
+ framework.load_model(model_path)
+
+ return framework
+
+
+def create_detection_framework(model_path: Union[str, Path],
+ class_names: Optional[List[str]] = None,
+ input_size: Tuple[int, int] = (640, 640),
+ confidence_threshold: float = 0.5) -> TorchInferenceFramework:
+ """
+ Create a framework configured for object detection.
+
+ Args:
+ model_path: Path to model file
+ class_names: Optional list of class names
+ input_size: Input image size
+ confidence_threshold: Detection confidence threshold
+
+ Returns:
+ Configured framework instance
+ """
+ config = ConfigFactory.create_detection_config(
+ input_size=input_size,
+ confidence_threshold=confidence_threshold
+ )
+
+ framework = TorchInferenceFramework(config)
+ framework.load_model(model_path)
+
+ return framework
+
+
+def create_segmentation_framework(model_path: Union[str, Path],
+ input_size: Tuple[int, int] = (640, 640),
+ threshold: float = 0.5) -> TorchInferenceFramework:
+ """
+ Create a framework configured for image segmentation.
+
+ Args:
+ model_path: Path to model file
+ input_size: Input image size
+ threshold: Segmentation threshold
+
+ Returns:
+ Configured framework instance
+ """
+ config = ConfigFactory.create_segmentation_config(
+ input_size=input_size,
+ threshold=threshold
+ )
+
+ framework = TorchInferenceFramework(config)
+ framework.load_model(model_path)
+
+ return framework
+
+
+# Convenience functions for quick usage
+
+def predict_image_classification(model_path: Union[str, Path],
+ image_path: Union[str, Path],
+ num_classes: int,
+ class_names: Optional[List[str]] = None) -> Dict[str, Any]:
+ """
+ Quick image classification prediction.
+
+ Args:
+ model_path: Path to model file
+ image_path: Path to image file
+ num_classes: Number of classes
+ class_names: Optional class names
+
+ Returns:
+ Classification result
+ """
+ framework = create_classification_framework(model_path, num_classes, class_names)
+ with framework:
+ result = framework.predict(image_path)
+ return result
+
+
+def predict_object_detection(model_path: Union[str, Path],
+ image_path: Union[str, Path],
+ class_names: Optional[List[str]] = None,
+ confidence_threshold: float = 0.5) -> Dict[str, Any]:
+ """
+ Quick object detection prediction.
+
+ Args:
+ model_path: Path to model file
+ image_path: Path to image file
+ class_names: Optional class names
+ confidence_threshold: Detection threshold
+
+ Returns:
+ Detection result
+ """
+ framework = create_detection_framework(
+ model_path, class_names, confidence_threshold=confidence_threshold
+ )
+ with framework:
+ result = framework.predict(image_path)
+ return result
+
+
+def predict_segmentation(model_path: Union[str, Path],
+ image_path: Union[str, Path],
+ threshold: float = 0.5) -> Dict[str, Any]:
+ """
+ Quick segmentation prediction.
+
+ Args:
+ model_path: Path to model file
+ image_path: Path to image file
+ threshold: Segmentation threshold
+
+ Returns:
+ Segmentation result
+ """
+ framework = create_segmentation_framework(model_path, threshold=threshold)
+ with framework:
+ result = framework.predict(image_path)
+ return result
+
+
+# Global framework instance for singleton usage
+_global_framework: Optional[TorchInferenceFramework] = None
+
+
+def get_global_framework() -> TorchInferenceFramework:
+ """Get the global framework instance."""
+ global _global_framework
+ if _global_framework is None:
+ _global_framework = TorchInferenceFramework()
+ return _global_framework
+
+
+def set_global_framework(framework: TorchInferenceFramework) -> None:
+ """Set the global framework instance."""
+ global _global_framework
+ _global_framework = framework
+
+
+# Convenience functions for optimization
+def create_optimized_framework(config: Optional[InferenceConfig] = None) -> TorchInferenceFramework:
+ """
+ Create an optimized framework with automatic optimization selection.
+
+ Args:
+ config: Inference configuration
+
+ Returns:
+ Optimized framework instance
+ """
+ class OptimizedFramework(TorchInferenceFramework):
+ def load_model(self, model_path: Union[str, Path], model_name: Optional[str] = None) -> None:
+ """Load model with automatic optimization."""
+ # Use optimized model instead of regular model
+ self.model = OptimizedModel(self.config)
+ self.model.load_model(model_path)
+
+ # Register with model manager
+ if model_name is None:
+ model_name = Path(model_path).stem if isinstance(model_path, (str, Path)) else str(model_path)
+
+ self._model_manager.register_model(model_name, self.model)
+
+ # Create inference engine
+ self.engine = create_inference_engine(self.model, self.config)
+
+ self._initialized = True
+ self.logger.info(f"Successfully loaded optimized model: {model_name}")
+
+ return OptimizedFramework(config)
diff --git a/framework/adapters/model_adapters.py b/framework/adapters/model_adapters.py
new file mode 100644
index 0000000..b66c302
--- /dev/null
+++ b/framework/adapters/model_adapters.py
@@ -0,0 +1,602 @@
+"""
+Model adapters for different deep learning frameworks.
+
+This module provides adapters to load and use models from different frameworks
+(PyTorch, ONNX, TensorRT, etc.) with a unified interface.
+"""
+
+from abc import ABC, abstractmethod
+from typing import Any, Dict, List, Optional, Union, Tuple
+from pathlib import Path
+import logging
+import torch
+import torch.nn as nn
+import numpy as np
+
+from ..core.base_model import BaseModel, ModelMetadata, ModelLoadError
+from ..core.config import InferenceConfig
+from ..processors.preprocessor import PreprocessingResult
+
+
+logger = logging.getLogger(__name__)
+
+
+class PyTorchModelAdapter(BaseModel):
+ """Adapter for PyTorch models."""
+
+ def __init__(self, config: InferenceConfig):
+ super().__init__(config)
+ self.model_path: Optional[Path] = None
+
+ def load_model(self, model_path: Union[str, Path]) -> None:
+ """Load PyTorch model."""
+ try:
+ model_path = Path(model_path)
+ self.model_path = model_path
+
+ self.logger.info(f"Loading PyTorch model from {model_path}")
+
+ # Load model
+ if model_path.suffix == '.pt' or model_path.suffix == '.pth':
+ checkpoint = torch.load(model_path, map_location=self.device, weights_only=False)
+
+ # Handle different save formats
+ if isinstance(checkpoint, nn.Module):
+ self.model = checkpoint
+ elif isinstance(checkpoint, dict):
+ if 'model' in checkpoint:
+ self.model = checkpoint['model']
+ elif 'state_dict' in checkpoint:
+ # Need model architecture for state_dict
+ raise ModelLoadError("State dict found but no model architecture provided")
+ else:
+ # Assume the dict is the state dict
+ raise ModelLoadError("Model architecture required for state dict")
+ else:
+ raise ModelLoadError(f"Unsupported checkpoint format: {type(checkpoint)}")
+
+ elif model_path.suffix == '.torchscript':
+ self.model = torch.jit.load(model_path, map_location=self.device)
+
+ else:
+ raise ModelLoadError(f"Unsupported file extension: {model_path.suffix}")
+
+ # Set metadata
+ self.metadata = ModelMetadata(
+ name=model_path.stem,
+ version="1.0",
+ model_type="pytorch",
+ input_shape=self._get_input_shape(),
+ output_shape=self._get_output_shape(),
+ description=f"PyTorch model loaded from {model_path}"
+ )
+
+ self._is_loaded = True
+ self.logger.info(f"Successfully loaded PyTorch model: {self.metadata.name}")
+
+ except Exception as e:
+ self.logger.error(f"Failed to load PyTorch model: {e}")
+ raise ModelLoadError(f"Failed to load PyTorch model: {e}") from e
+
+ def preprocess(self, inputs: Any) -> torch.Tensor:
+ """Preprocess inputs for PyTorch model."""
+ # Use the preprocessing pipeline
+ from ..processors.preprocessor import create_default_preprocessing_pipeline
+
+ if not hasattr(self, '_preprocessing_pipeline'):
+ self._preprocessing_pipeline = create_default_preprocessing_pipeline(self.config)
+
+ result = self._preprocessing_pipeline.preprocess(inputs)
+ return result.data
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ """Run forward pass through PyTorch model."""
+ if not self._is_loaded:
+ raise ModelLoadError("Model not loaded")
+
+ model = self.get_model_for_inference()
+
+ # Ensure inputs are on the correct device
+ inputs = inputs.to(self.device)
+
+ # Handle different model types
+ if hasattr(model, 'predict') and callable(model.predict):
+ # YOLO-style model
+ outputs = model.predict(inputs)
+ elif isinstance(model, torch.jit.ScriptModule):
+ # TorchScript model
+ outputs = model(inputs)
+ else:
+ # Standard PyTorch model
+ outputs = model(inputs)
+
+ return outputs
+
+ def postprocess(self, outputs: torch.Tensor) -> Any:
+ """Postprocess PyTorch model outputs."""
+ # Use the postprocessing pipeline
+ from ..processors.postprocessor import create_default_postprocessing_pipeline
+
+ if not hasattr(self, '_postprocessing_pipeline'):
+ self._postprocessing_pipeline = create_default_postprocessing_pipeline(self.config)
+
+ result = self._postprocessing_pipeline.auto_postprocess(outputs)
+
+ # Convert to dict for backward compatibility
+ if hasattr(result, 'to_dict'):
+ return result.to_dict()
+
+ return result
+
+ def predict_batch(self, inputs_list: List[Any]) -> List[Any]:
+ """
+ Batch prediction optimized for PyTorch models.
+
+ Args:
+ inputs_list: List of input data
+
+ Returns:
+ List of predictions
+ """
+ if not inputs_list:
+ return []
+
+ # Try to batch process if possible
+ try:
+ # Preprocess all inputs
+ preprocessed_inputs = [self.preprocess(inp) for inp in inputs_list]
+
+ # Stack into batch tensor if possible
+ if all(isinstance(inp, torch.Tensor) and inp.shape == preprocessed_inputs[0].shape for inp in preprocessed_inputs):
+ # Check if inputs already have batch dimension of 1 - if so, remove it before stacking
+ if len(preprocessed_inputs[0].shape) == 4 and preprocessed_inputs[0].shape[0] == 1:
+ # Remove the batch dimension from each input before stacking
+ squeezed_inputs = [inp.squeeze(0) for inp in preprocessed_inputs]
+ batch_tensor = torch.stack(squeezed_inputs, dim=0)
+ else:
+ batch_tensor = torch.stack(preprocessed_inputs, dim=0)
+
+ # Forward pass on batch
+ with torch.no_grad():
+ batch_outputs = self.forward(batch_tensor)
+
+ # Split batch results and postprocess
+ if len(batch_outputs.shape) > 0:
+ outputs_list = torch.split(batch_outputs, 1, dim=0)
+ results = []
+ for output in outputs_list:
+ output = output.squeeze(0) # Remove batch dimension
+ result = self.postprocess(output)
+ results.append(result)
+ return results
+
+ # Fallback to individual processing
+ return [self.predict(inp) for inp in inputs_list]
+
+ except Exception as e:
+ self.logger.warning(f"Batch processing failed: {e}, falling back to individual processing")
+ # Fallback to individual processing
+ return [self.predict(inp) for inp in inputs_list]
+
+ def _get_input_shape(self) -> Tuple[int, ...]:
+ """Get model input shape."""
+ try:
+ if hasattr(self.model, 'input_shape'):
+ return tuple(self.model.input_shape)
+ elif hasattr(self.config, 'preprocessing') and hasattr(self.config.preprocessing, 'input_size'):
+ h, w = self.config.preprocessing.input_size
+ return (3, h, w)
+ else:
+ return (3, 224, 224) # Default
+ except Exception:
+ return (3, 224, 224) # Default
+
+ def _get_output_shape(self) -> Tuple[int, ...]:
+ """Get model output shape."""
+ try:
+ if hasattr(self.model, 'output_shape'):
+ return tuple(self.model.output_shape)
+ else:
+ return (1000,) # Default for classification
+ except Exception:
+ return (1000,) # Default
+
+
+class ONNXModelAdapter(BaseModel):
+ """Adapter for ONNX models."""
+
+ def __init__(self, config: InferenceConfig):
+ super().__init__(config)
+ self.model_path: Optional[Path] = None
+ self.session = None
+ self.input_names = []
+ self.output_names = []
+
+ def load_model(self, model_path: Union[str, Path]) -> None:
+ """Load ONNX model."""
+ try:
+ import onnxruntime as ort
+ except ImportError:
+ raise ModelLoadError("onnxruntime not installed. Install with: pip install onnxruntime")
+
+ try:
+ model_path = Path(model_path)
+ self.model_path = model_path
+
+ self.logger.info(f"Loading ONNX model from {model_path}")
+
+ # Configure ONNX Runtime providers
+ providers = ['CPUExecutionProvider']
+ if self.device.type == 'cuda':
+ providers.insert(0, 'CUDAExecutionProvider')
+
+ # Create inference session
+ self.session = ort.InferenceSession(str(model_path), providers=providers)
+
+ # Get input and output names
+ self.input_names = [input.name for input in self.session.get_inputs()]
+ self.output_names = [output.name for output in self.session.get_outputs()]
+
+ # Get input and output shapes
+ input_shapes = [input.shape for input in self.session.get_inputs()]
+ output_shapes = [output.shape for output in self.session.get_outputs()]
+
+ # Set metadata
+ self.metadata = ModelMetadata(
+ name=model_path.stem,
+ version="1.0",
+ model_type="onnx",
+ input_shape=tuple(input_shapes[0]) if input_shapes else (1, 3, 224, 224),
+ output_shape=tuple(output_shapes[0]) if output_shapes else (1, 1000),
+ description=f"ONNX model loaded from {model_path}"
+ )
+
+ self._is_loaded = True
+ self.logger.info(f"Successfully loaded ONNX model: {self.metadata.name}")
+
+ except Exception as e:
+ self.logger.error(f"Failed to load ONNX model: {e}")
+ raise ModelLoadError(f"Failed to load ONNX model: {e}") from e
+
+ def preprocess(self, inputs: Any) -> torch.Tensor:
+ """Preprocess inputs for ONNX model."""
+ # Use the preprocessing pipeline
+ from ..processors.preprocessor import create_default_preprocessing_pipeline
+
+ if not hasattr(self, '_preprocessing_pipeline'):
+ self._preprocessing_pipeline = create_default_preprocessing_pipeline(self.config)
+
+ result = self._preprocessing_pipeline.preprocess(inputs)
+ return result.data
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ """Run forward pass through ONNX model."""
+ if not self._is_loaded or not self.session:
+ raise ModelLoadError("Model not loaded")
+
+ # Convert to numpy for ONNX Runtime
+ inputs_np = inputs.detach().cpu().numpy()
+
+ # Prepare input dict
+ input_dict = {self.input_names[0]: inputs_np}
+
+ # Run inference
+ outputs = self.session.run(self.output_names, input_dict)
+
+ # Convert back to torch tensor
+ output_tensor = torch.from_numpy(outputs[0]).to(self.device)
+
+ return output_tensor
+
+ def postprocess(self, outputs: torch.Tensor) -> Any:
+ """Postprocess ONNX model outputs."""
+ # Use the postprocessing pipeline
+ from ..processors.postprocessor import create_default_postprocessing_pipeline
+
+ if not hasattr(self, '_postprocessing_pipeline'):
+ self._postprocessing_pipeline = create_default_postprocessing_pipeline(self.config)
+
+ result = self._postprocessing_pipeline.auto_postprocess(outputs)
+ return result
+
+
+class TensorRTModelAdapter(BaseModel):
+ """Adapter for TensorRT models."""
+
+ def __init__(self, config: InferenceConfig):
+ super().__init__(config)
+ self.model_path: Optional[Path] = None
+ self.engine = None
+ self.context = None
+ self.input_names = []
+ self.output_names = []
+ self.bindings = []
+
+ def load_model(self, model_path: Union[str, Path]) -> None:
+ """Load TensorRT model."""
+ try:
+ import tensorrt as trt
+ import pycuda.driver as cuda
+ import pycuda.autoinit
+ except ImportError:
+ raise ModelLoadError("TensorRT or PyCUDA not installed")
+
+ if self.device.type != 'cuda':
+ raise ModelLoadError("TensorRT requires CUDA device")
+
+ try:
+ model_path = Path(model_path)
+ self.model_path = model_path
+
+ self.logger.info(f"Loading TensorRT model from {model_path}")
+
+ # Load TensorRT engine
+ with open(model_path, 'rb') as f:
+ engine_data = f.read()
+
+ # Create runtime and deserialize engine
+ runtime = trt.Runtime(trt.Logger(trt.Logger.WARNING))
+ self.engine = runtime.deserialize_cuda_engine(engine_data)
+ self.context = self.engine.create_execution_context()
+
+ # Get input and output information
+ for i in range(self.engine.num_bindings):
+ name = self.engine.get_binding_name(i)
+ shape = self.engine.get_binding_shape(i)
+
+ if self.engine.binding_is_input(i):
+ self.input_names.append(name)
+ else:
+ self.output_names.append(name)
+
+ # Set metadata
+ self.metadata = ModelMetadata(
+ name=model_path.stem,
+ version="1.0",
+ model_type="tensorrt",
+ input_shape=tuple(self.engine.get_binding_shape(0)),
+ output_shape=tuple(self.engine.get_binding_shape(1)) if self.engine.num_bindings > 1 else (1000,),
+ description=f"TensorRT model loaded from {model_path}"
+ )
+
+ self._is_loaded = True
+ self.logger.info(f"Successfully loaded TensorRT model: {self.metadata.name}")
+
+ except Exception as e:
+ self.logger.error(f"Failed to load TensorRT model: {e}")
+ raise ModelLoadError(f"Failed to load TensorRT model: {e}") from e
+
+ def preprocess(self, inputs: Any) -> torch.Tensor:
+ """Preprocess inputs for TensorRT model."""
+ # Use the preprocessing pipeline
+ from ..processors.preprocessor import create_default_preprocessing_pipeline
+
+ if not hasattr(self, '_preprocessing_pipeline'):
+ self._preprocessing_pipeline = create_default_preprocessing_pipeline(self.config)
+
+ result = self._preprocessing_pipeline.preprocess(inputs)
+ return result.data
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ """Run forward pass through TensorRT model."""
+ if not self._is_loaded or not self.engine or not self.context:
+ raise ModelLoadError("Model not loaded")
+
+ import pycuda.driver as cuda
+
+ # Ensure inputs are contiguous and on GPU
+ inputs = inputs.contiguous()
+
+ # Allocate GPU memory for outputs
+ output_shape = self.engine.get_binding_shape(1)
+ outputs = torch.empty(output_shape, dtype=inputs.dtype, device=self.device)
+
+ # Set up bindings
+ bindings = [inputs.data_ptr(), outputs.data_ptr()]
+
+ # Run inference
+ self.context.execute_v2(bindings)
+
+ return outputs
+
+ def postprocess(self, outputs: torch.Tensor) -> Any:
+ """Postprocess TensorRT model outputs."""
+ # Use the postprocessing pipeline
+ from ..processors.postprocessor import create_default_postprocessing_pipeline
+
+ if not hasattr(self, '_postprocessing_pipeline'):
+ self._postprocessing_pipeline = create_default_postprocessing_pipeline(self.config)
+
+ result = self._postprocessing_pipeline.auto_postprocess(outputs)
+ return result
+
+
+class HuggingFaceModelAdapter(BaseModel):
+ """Adapter for Hugging Face transformers models."""
+
+ def __init__(self, config: InferenceConfig):
+ super().__init__(config)
+ self.model_name: Optional[str] = None
+ self.tokenizer = None
+
+ def load_model(self, model_path: Union[str, Path]) -> None:
+ """Load Hugging Face model."""
+ try:
+ from transformers import AutoModel, AutoTokenizer, AutoConfig
+ except ImportError:
+ raise ModelLoadError("transformers not installed. Install with: pip install transformers")
+
+ try:
+ # Handle both local path and model name
+ if isinstance(model_path, Path) and model_path.exists():
+ model_name = str(model_path)
+ else:
+ model_name = str(model_path)
+
+ self.model_name = model_name
+
+ self.logger.info(f"Loading Hugging Face model: {model_name}")
+
+ # Load config to get model info
+ config = AutoConfig.from_pretrained(model_name)
+
+ # Load model and tokenizer
+ self.model = AutoModel.from_pretrained(model_name)
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
+
+ # Move to device
+ self.model.to(self.device)
+
+ # Set metadata
+ self.metadata = ModelMetadata(
+ name=model_name.split('/')[-1] if '/' in model_name else model_name,
+ version="1.0",
+ model_type="huggingface",
+ input_shape=(512,), # Default sequence length
+ output_shape=(config.hidden_size,) if hasattr(config, 'hidden_size') else (768,),
+ description=f"Hugging Face model: {model_name}"
+ )
+
+ self._is_loaded = True
+ self.logger.info(f"Successfully loaded Hugging Face model: {self.metadata.name}")
+
+ except Exception as e:
+ self.logger.error(f"Failed to load Hugging Face model: {e}")
+ raise ModelLoadError(f"Failed to load Hugging Face model: {e}") from e
+
+ def preprocess(self, inputs: Any) -> torch.Tensor:
+ """Preprocess text inputs for Hugging Face model."""
+ if not self.tokenizer:
+ raise ModelLoadError("Tokenizer not loaded")
+
+ if isinstance(inputs, str):
+ text = inputs
+ elif isinstance(inputs, list):
+ text = inputs # Assume list of strings
+ else:
+ text = str(inputs)
+
+ # Tokenize
+ encoded = self.tokenizer(
+ text,
+ padding=True,
+ truncation=True,
+ max_length=512,
+ return_tensors="pt"
+ )
+
+ # Move to device
+ for key in encoded:
+ encoded[key] = encoded[key].to(self.device)
+
+ return encoded
+
+ def forward(self, inputs: Union[torch.Tensor, Dict[str, torch.Tensor]]) -> torch.Tensor:
+ """Run forward pass through Hugging Face model."""
+ if not self._is_loaded:
+ raise ModelLoadError("Model not loaded")
+
+ model = self.get_model_for_inference()
+
+ if isinstance(inputs, dict):
+ # Tokenized inputs
+ outputs = model(**inputs)
+ else:
+ # Raw tensor
+ outputs = model(inputs)
+
+ # Extract appropriate outputs
+ if hasattr(outputs, 'last_hidden_state'):
+ return outputs.last_hidden_state
+ elif hasattr(outputs, 'pooler_output'):
+ return outputs.pooler_output
+ else:
+ return outputs[0] if isinstance(outputs, tuple) else outputs
+
+ def postprocess(self, outputs: torch.Tensor) -> Any:
+ """Postprocess Hugging Face model outputs."""
+ # For transformers, typically return embeddings or logits directly
+ return {
+ "embeddings": outputs.detach().cpu().numpy(),
+ "shape": list(outputs.shape)
+ }
+
+
+class ModelAdapterFactory:
+ """Factory for creating model adapters."""
+
+ @staticmethod
+ def create_adapter(model_path: Union[str, Path], config: InferenceConfig) -> BaseModel:
+ """Create appropriate model adapter based on file extension or model type."""
+ model_path = Path(model_path) if isinstance(model_path, str) else model_path
+
+ # Determine adapter type based on file extension or model name
+ if model_path.suffix in ['.pt', '.pth', '.torchscript']:
+ return PyTorchModelAdapter(config)
+ elif model_path.suffix == '.onnx':
+ return ONNXModelAdapter(config)
+ elif model_path.suffix in ['.trt', '.engine']:
+ return TensorRTModelAdapter(config)
+ elif ('/' in str(model_path) and not model_path.exists()) or \
+ (not model_path.exists() and not model_path.suffix and '-' in str(model_path)):
+ # Likely a Hugging Face model name (contains '/' or has no extension with '-')
+ return HuggingFaceModelAdapter(config)
+ else:
+ # Default to PyTorch
+ return PyTorchModelAdapter(config)
+
+ @staticmethod
+ def get_supported_formats() -> List[str]:
+ """Get list of supported model formats."""
+ return [
+ '.pt', '.pth', '.torchscript', # PyTorch
+ '.onnx', # ONNX
+ '.trt', '.engine', # TensorRT
+ 'huggingface' # Hugging Face
+ ]
+
+
+def load_model(model_path: Union[str, Path], config: Optional[InferenceConfig] = None) -> BaseModel:
+ """
+ Convenient function to load any supported model.
+
+ Args:
+ model_path: Path to model file or model identifier
+ config: Inference configuration
+
+ Returns:
+ Loaded model adapter
+
+ Raises:
+ ValueError: If model format is not supported
+ """
+ if config is None:
+ from ..core.config import get_global_config
+ config = get_global_config()
+
+ model_path = Path(model_path) if isinstance(model_path, str) else model_path
+
+ # Validate model format before proceeding
+ if model_path.exists() and model_path.suffix not in ['.pt', '.pth', '.torchscript', '.onnx', '.trt', '.engine']:
+ raise ValueError(f"Unsupported model format: {model_path.suffix}")
+
+ # Create adapter
+ try:
+ adapter = ModelAdapterFactory.create_adapter(model_path, config)
+
+ # Load model
+ adapter.load_model(model_path)
+
+ # Optimize for inference
+ adapter.optimize_for_inference()
+
+ # Warmup
+ adapter.warmup()
+
+ return adapter
+ except ModelLoadError as e:
+ # Convert ModelLoadError to ValueError for unsupported formats
+ if "Unsupported file extension" in str(e):
+ raise ValueError(f"Unsupported model format: {model_path}") from e
+ else:
+ raise
diff --git a/framework/core/base_model.py b/framework/core/base_model.py
new file mode 100644
index 0000000..a69dd8d
--- /dev/null
+++ b/framework/core/base_model.py
@@ -0,0 +1,440 @@
+"""
+Base model interface and abstract classes for the PyTorch inference framework.
+
+This module defines the core interfaces that all model implementations must follow,
+ensuring consistency and interoperability across different model types.
+"""
+
+from abc import ABC, abstractmethod
+from typing import Any, Dict, List, Optional, Union, Tuple
+import torch
+import torch.nn as nn
+from pathlib import Path
+import logging
+from dataclasses import dataclass
+from enum import Enum
+
+from ..core.config import InferenceConfig
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class ModelMetadata:
+ """Metadata about a model."""
+ name: str
+ version: str
+ model_type: str
+ input_shape: Tuple[int, ...]
+ output_shape: Tuple[int, ...]
+ description: Optional[str] = None
+ author: Optional[str] = None
+ license: Optional[str] = None
+ tags: List[str] = None
+
+ def __post_init__(self):
+ if self.tags is None:
+ self.tags = []
+
+
+class ModelLoadError(Exception):
+ """Exception raised when model loading fails."""
+ pass
+
+
+class ModelInferenceError(Exception):
+ """Exception raised when model inference fails."""
+ pass
+
+
+class BaseModel(ABC):
+ """
+ Abstract base class for all model implementations.
+
+ This class defines the interface that all model implementations must follow,
+ ensuring consistency and proper resource management.
+ """
+
+ def __init__(self, config: InferenceConfig):
+ self.config = config
+ self.device = config.device.get_torch_device()
+ self.model: Optional[nn.Module] = None
+ self.metadata: Optional[ModelMetadata] = None
+ self._is_loaded = False
+ self._compiled_model = None
+
+ # Setup logging
+ self.logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
+
+ @abstractmethod
+ def load_model(self, model_path: Union[str, Path]) -> None:
+ """
+ Load the model from the given path.
+
+ Args:
+ model_path: Path to the model file
+
+ Raises:
+ ModelLoadError: If model loading fails
+ """
+ pass
+
+ @abstractmethod
+ def preprocess(self, inputs: Any) -> torch.Tensor:
+ """
+ Preprocess inputs before inference.
+
+ Args:
+ inputs: Raw inputs (images, text, etc.)
+
+ Returns:
+ Preprocessed tensor ready for model inference
+ """
+ pass
+
+ @abstractmethod
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ """
+ Run forward pass through the model.
+
+ Args:
+ inputs: Preprocessed input tensor
+
+ Returns:
+ Raw model outputs
+ """
+ pass
+
+ @abstractmethod
+ def postprocess(self, outputs: torch.Tensor) -> Any:
+ """
+ Postprocess model outputs.
+
+ Args:
+ outputs: Raw model outputs
+
+ Returns:
+ Processed outputs in the desired format
+ """
+ pass
+
+ def predict(self, inputs: Any) -> Any:
+ """
+ Complete prediction pipeline: preprocess -> forward -> postprocess.
+
+ Args:
+ inputs: Raw inputs
+
+ Returns:
+ Final predictions
+ """
+ if not self._is_loaded:
+ raise ModelInferenceError("Model not loaded. Call load_model() first.")
+
+ try:
+ # Preprocess
+ preprocessed_inputs = self.preprocess(inputs)
+
+ # Forward pass
+ with torch.no_grad():
+ try:
+ raw_outputs = self.forward(preprocessed_inputs)
+ except Exception as e:
+ # Handle compilation errors by falling back to non-compiled model
+ if "CppCompileError" in str(e) and self._compiled_model is not None:
+ self.logger.warning("Torch compilation failed, falling back to non-compiled model")
+ self.config.device.use_torch_compile = False
+ self._compiled_model = None
+ raw_outputs = self.forward(preprocessed_inputs)
+ else:
+ raise
+
+ # Postprocess
+ predictions = self.postprocess(raw_outputs)
+
+ # Convert to dict for backward compatibility if needed
+ if hasattr(predictions, 'to_dict'):
+ return predictions.to_dict()
+
+ return predictions
+
+ except Exception as e:
+ self.logger.error(f"Prediction failed: {e}")
+ raise ModelInferenceError(f"Prediction failed: {e}") from e
+
+ def predict_batch(self, inputs: List[Any]) -> List[Any]:
+ """
+ Batch prediction with automatic batching.
+
+ Args:
+ inputs: List of raw inputs
+
+ Returns:
+ List of predictions
+ """
+ if not inputs:
+ return []
+
+ batch_size = self.config.batch.batch_size
+ results = []
+
+ for i in range(0, len(inputs), batch_size):
+ batch = inputs[i:i + batch_size]
+
+ if len(batch) == 1:
+ # Single item
+ result = self.predict(batch[0])
+ results.append(result)
+ else:
+ # True batch processing
+ batch_result = self.predict_batch_internal(batch)
+ results.extend(batch_result)
+
+ return results
+
+ def predict_batch_internal(self, inputs: List[Any]) -> List[Any]:
+ """
+ Internal batch processing method. Override for true batch processing.
+
+ Args:
+ inputs: List of raw inputs (batch)
+
+ Returns:
+ List of predictions
+ """
+ # Default implementation: process individually
+ return [self.predict(inp) for inp in inputs]
+
+ def warmup(self, num_iterations: int = None) -> None:
+ """
+ Warmup the model with dummy inputs.
+
+ Args:
+ num_iterations: Number of warmup iterations
+ """
+ if num_iterations is None:
+ num_iterations = self.config.performance.warmup_iterations
+
+ if not self._is_loaded:
+ self.logger.warning("Model not loaded, skipping warmup")
+ return
+
+ self.logger.info(f"Warming up model with {num_iterations} iterations")
+
+ try:
+ # Create dummy input based on preprocessing config
+ dummy_input = self._create_dummy_input()
+
+ for i in range(num_iterations):
+ try:
+ with torch.no_grad():
+ _ = self.forward(dummy_input)
+ except Exception as e:
+ self.logger.warning(f"Warmup iteration {i+1} failed: {e}")
+ # If first iteration fails due to compilation, disable compilation and retry
+ if i == 0 and "CppCompileError" in str(e):
+ self.logger.warning("Disabling torch.compile due to compilation error")
+ self.config.device.use_torch_compile = False
+ self._compiled_model = None
+ try:
+ with torch.no_grad():
+ _ = self.forward(dummy_input)
+ except Exception as e2:
+ self.logger.error(f"Warmup failed even without compilation: {e2}")
+ break
+ else:
+ # For other errors, just continue
+ continue
+
+ self.logger.info("Model warmup completed")
+
+ except Exception as e:
+ self.logger.warning(f"Warmup failed: {e}. Model may still work for inference.")
+
+ def compile_model(self) -> None:
+ """Compile the model using torch.compile for optimization."""
+ if not self._is_loaded or not self.config.device.use_torch_compile:
+ return
+
+ if not hasattr(torch, 'compile'):
+ self.logger.warning("torch.compile not available, skipping compilation")
+ return
+
+ try:
+ self.logger.info("Compiling model with torch.compile")
+ self._compiled_model = torch.compile(
+ self.model,
+ mode=self.config.device.compile_mode,
+ fullgraph=False
+ )
+ self.logger.info("Model compilation completed")
+ except Exception as e:
+ self.logger.warning(f"Model compilation failed: {e}. Continuing without compilation.")
+ # Don't raise the exception, just continue without compilation
+
+ def get_model_for_inference(self) -> nn.Module:
+ """Get the model instance to use for inference (compiled or original)."""
+ return self._compiled_model if self._compiled_model is not None else self.model
+
+ def optimize_for_inference(self) -> None:
+ """Apply various optimizations for inference."""
+ if not self._is_loaded:
+ return
+
+ # Set to evaluation mode
+ self.model.eval()
+
+ # Move to target device
+ self.model.to(self.device)
+
+ # Apply FP16 if requested
+ if self.config.device.use_fp16:
+ self.model.half()
+
+ # Compile model if requested
+ self.compile_model()
+
+ # Configure CUDA optimizations
+ if self.device.type == 'cuda':
+ torch.backends.cudnn.benchmark = True
+ torch.backends.cudnn.deterministic = False
+
+ def _create_dummy_input(self) -> torch.Tensor:
+ """Create dummy input for warmup. Override in subclasses."""
+ # Default implementation for image models
+ if hasattr(self.config, 'preprocessing') and hasattr(self.config.preprocessing, 'input_size'):
+ height, width = self.config.preprocessing.input_size
+ return torch.randn(1, 3, height, width, device=self.device)
+ else:
+ # Generic dummy input
+ return torch.randn(1, 10, device=self.device)
+
+ def get_memory_usage(self) -> Dict[str, float]:
+ """Get current memory usage information."""
+ memory_info = {}
+
+ if self.device.type == 'cuda':
+ memory_info['gpu_allocated_mb'] = torch.cuda.memory_allocated(self.device) / (1024 ** 2)
+ memory_info['gpu_reserved_mb'] = torch.cuda.memory_reserved(self.device) / (1024 ** 2)
+ memory_info['gpu_max_allocated_mb'] = torch.cuda.max_memory_allocated(self.device) / (1024 ** 2)
+
+ # CPU memory would require psutil
+ try:
+ import psutil
+ process = psutil.Process()
+ memory_info['cpu_memory_mb'] = process.memory_info().rss / (1024 ** 2)
+ except ImportError:
+ pass
+
+ return memory_info
+
+ def cleanup(self) -> None:
+ """Cleanup resources."""
+ if self.device.type == 'cuda':
+ torch.cuda.empty_cache()
+
+ @property
+ def is_loaded(self) -> bool:
+ """Check if model is loaded."""
+ return self._is_loaded
+
+ @property
+ def model_info(self) -> Dict[str, Any]:
+ """Get model information."""
+ info = {
+ "loaded": self._is_loaded,
+ "device": str(self.device),
+ "config": self.config,
+ }
+
+ if self.metadata:
+ # Convert metadata to dict for compatibility
+ if hasattr(self.metadata, '__dict__'):
+ info["metadata"] = self.metadata.__dict__.copy()
+ else:
+ info["metadata"] = {
+ "model_type": getattr(self.metadata, 'model_type', 'pytorch'),
+ "input_shape": getattr(self.metadata, 'input_shape', None),
+ "output_shape": getattr(self.metadata, 'output_shape', None),
+ "num_parameters": getattr(self.metadata, 'num_parameters', None),
+ "framework_version": getattr(self.metadata, 'framework_version', None)
+ }
+
+ if self._is_loaded:
+ info["memory_usage"] = self.get_memory_usage()
+
+ # Model parameters count
+ if self.model:
+ try:
+ # Handle both real models and Mock objects
+ if hasattr(self.model, 'parameters') and callable(self.model.parameters):
+ total_params = sum(p.numel() for p in self.model.parameters())
+ trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
+ info["total_parameters"] = total_params
+ info["trainable_parameters"] = trainable_params
+ except (TypeError, AttributeError):
+ # Skip parameter counting for Mock objects or other types
+ pass
+
+ return info
+
+
+class ModelManager:
+ """
+ Manager class for handling multiple model instances and lifecycle.
+ """
+
+ def __init__(self):
+ self._models: Dict[str, BaseModel] = {}
+ self.logger = logging.getLogger(f"{__name__}.ModelManager")
+
+ def register_model(self, name: str, model: BaseModel) -> None:
+ """Register a model instance."""
+ if name in self._models:
+ self.logger.warning(f"Model '{name}' already exists, replacing")
+
+ self._models[name] = model
+ self.logger.info(f"Registered model '{name}'")
+
+ def get_model(self, name: str) -> BaseModel:
+ """Get a registered model."""
+ if name not in self._models:
+ raise KeyError(f"Model '{name}' not found")
+
+ return self._models[name]
+
+ def list_models(self) -> List[str]:
+ """List all registered models."""
+ return list(self._models.keys())
+
+ def load_model(self, name: str, model_path: Union[str, Path]) -> None:
+ """Load a registered model."""
+ model = self.get_model(name)
+ model.load_model(model_path)
+ model.optimize_for_inference()
+ model.warmup()
+
+ def unload_model(self, name: str) -> None:
+ """Unload and cleanup a model."""
+ if name in self._models:
+ self._models[name].cleanup()
+ del self._models[name]
+ self.logger.info(f"Unloaded model '{name}'")
+
+ def cleanup_all(self) -> None:
+ """Cleanup all models."""
+ for name in list(self._models.keys()):
+ self.unload_model(name)
+
+
+# Global model manager instance
+_global_model_manager: Optional[ModelManager] = None
+
+
+def get_model_manager() -> ModelManager:
+ """Get the global model manager."""
+ global _global_model_manager
+ if _global_model_manager is None:
+ _global_model_manager = ModelManager()
+ return _global_model_manager
diff --git a/framework/core/config.py b/framework/core/config.py
new file mode 100644
index 0000000..75bd474
--- /dev/null
+++ b/framework/core/config.py
@@ -0,0 +1,392 @@
+"""
+Production-level configuration system for PyTorch inference framework.
+
+This module provides a centralized, type-safe configuration system with support for
+environment variables, validation, and different model types.
+"""
+
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional, Any, Union, Tuple
+from enum import Enum
+from pathlib import Path
+import os
+import torch
+from abc import ABC, abstractmethod
+
+
+class ModelType(Enum):
+ """Supported model types."""
+ CLASSIFICATION = "classification"
+ SEGMENTATION = "segmentation"
+ DETECTION = "detection"
+ REGRESSION = "regression"
+ CUSTOM = "custom"
+
+
+class DeviceType(Enum):
+ """Supported device types."""
+ CPU = "cpu"
+ CUDA = "cuda"
+ MPS = "mps" # Apple Silicon
+ AUTO = "auto"
+
+ @classmethod
+ def from_string(cls, value: str) -> "DeviceType":
+ """Create DeviceType from string value."""
+ if not value:
+ return cls.AUTO
+
+ value = value.lower()
+ for device_type in cls:
+ if device_type.value == value:
+ return device_type
+
+ # For explicitly invalid device types, raise an error
+ valid_values = [dt.value for dt in cls]
+ raise ValueError(f"Invalid device type: '{value}'. Must be one of: {valid_values}")
+
+
+class OptimizationLevel(Enum):
+ """Optimization levels for inference."""
+ NONE = "none"
+ BASIC = "basic"
+ AGGRESSIVE = "aggressive"
+
+
+@dataclass
+class DeviceConfig:
+ """Device and hardware configuration."""
+ device_type: DeviceType = DeviceType.AUTO
+ device_id: Optional[int] = None
+ use_fp16: bool = False
+ use_int8: bool = False
+ use_tensorrt: bool = False
+ use_torch_compile: bool = False # Disabled by default to avoid C++ compilation issues
+ compile_mode: str = "reduce-overhead"
+
+ def __post_init__(self):
+ """Validate device configuration after initialization."""
+ if isinstance(self.device_type, str):
+ # Convert string to DeviceType enum if possible
+ try:
+ self.device_type = DeviceType(self.device_type)
+ except ValueError:
+ # Check if it's a valid device string that torch would accept
+ valid_devices = ['cpu', 'cuda', 'mps']
+ if self.device_type not in valid_devices:
+ raise ValueError(f"Invalid device type: {self.device_type}. Must be one of {valid_devices} or a DeviceType enum value.")
+
+ def get_torch_device(self) -> torch.device:
+ """Get the actual torch device."""
+ if self.device_type == DeviceType.AUTO:
+ if torch.cuda.is_available():
+ device_str = "cuda"
+ if self.device_id is not None:
+ device_str = f"cuda:{self.device_id}"
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
+ device_str = "mps"
+ else:
+ device_str = "cpu"
+ else:
+ # Handle both DeviceType enum and string values
+ if isinstance(self.device_type, str):
+ device_str = self.device_type
+ else:
+ device_str = self.device_type.value
+ if (isinstance(self.device_type, DeviceType) and self.device_type == DeviceType.CUDA) or \
+ (isinstance(self.device_type, str) and self.device_type == "cuda"):
+ if self.device_id is not None:
+ device_str = f"cuda:{self.device_id}"
+
+ return torch.device(device_str)
+
+
+@dataclass
+class BatchConfig:
+ """Batch processing configuration."""
+ batch_size: int = 1
+ min_batch_size: int = 1
+ max_batch_size: int = 16
+ adaptive_batching: bool = True
+ timeout_seconds: float = 30.0
+ queue_size: int = 100
+
+ def __post_init__(self):
+ """Validate batch configuration after initialization."""
+ if self.batch_size > self.max_batch_size:
+ raise ValueError(f"batch_size ({self.batch_size}) cannot be greater than max_batch_size ({self.max_batch_size})")
+ if self.min_batch_size > self.batch_size:
+ raise ValueError(f"min_batch_size ({self.min_batch_size}) cannot be greater than batch_size ({self.batch_size})")
+ if self.min_batch_size < 1:
+ raise ValueError("min_batch_size must be at least 1")
+
+
+@dataclass
+class PreprocessingConfig:
+ """Preprocessing configuration."""
+ input_size: Tuple[int, int] = (224, 224)
+ mean: List[float] = field(default_factory=lambda: [0.485, 0.456, 0.406])
+ std: List[float] = field(default_factory=lambda: [0.229, 0.224, 0.225])
+ interpolation: str = "bilinear"
+ center_crop: bool = True
+ normalize: bool = True
+ to_rgb: bool = True
+
+
+@dataclass
+class PostprocessingConfig:
+ """Postprocessing configuration."""
+ threshold: float = 0.5
+ nms_threshold: float = 0.5
+ max_detections: int = 100
+ apply_sigmoid: bool = False
+ apply_softmax: bool = False
+
+
+@dataclass
+class PerformanceConfig:
+ """Performance and monitoring configuration."""
+ enable_profiling: bool = False
+ enable_metrics: bool = True
+ warmup_iterations: int = 3
+ benchmark_iterations: int = 10
+ log_level: str = "INFO"
+ enable_async: bool = True
+ max_workers: int = 4
+ max_concurrent_requests: int = 10
+
+
+@dataclass
+class CacheConfig:
+ """Caching configuration."""
+ enable_caching: bool = True
+ cache_size: int = 100
+ cache_ttl_seconds: int = 3600
+ disk_cache_path: Optional[Path] = None
+
+
+@dataclass
+class SecurityConfig:
+ """Security and safety configuration."""
+ max_file_size_mb: int = 100
+ allowed_extensions: List[str] = field(default_factory=lambda: [".jpg", ".jpeg", ".png", ".bmp"])
+ validate_inputs: bool = True
+ sanitize_outputs: bool = True
+
+
+@dataclass
+class InferenceConfig:
+ """Main inference configuration."""
+ model_type: ModelType = ModelType.CUSTOM
+ device: DeviceConfig = field(default_factory=lambda: DeviceConfig(device_type=DeviceType.AUTO))
+ batch: BatchConfig = field(default_factory=BatchConfig)
+ preprocessing: PreprocessingConfig = field(default_factory=PreprocessingConfig)
+ postprocessing: PostprocessingConfig = field(default_factory=PostprocessingConfig)
+ performance: PerformanceConfig = field(default_factory=PerformanceConfig)
+ cache: CacheConfig = field(default_factory=CacheConfig)
+ security: SecurityConfig = field(default_factory=SecurityConfig)
+
+ # Custom parameters for specific model types
+ custom_params: Dict[str, Any] = field(default_factory=dict)
+
+ # Property accessors for common configuration values
+ @property
+ def num_classes(self) -> Optional[int]:
+ """Get number of classes from custom params."""
+ return self.custom_params.get("num_classes")
+
+ @property
+ def input_size(self) -> Optional[Tuple[int, int]]:
+ """Get input size from preprocessing config."""
+ return self.preprocessing.input_size
+
+ @property
+ def threshold(self) -> float:
+ """Get threshold from postprocessing config."""
+ return self.postprocessing.threshold
+
+ @property
+ def optimizations(self) -> Dict[str, Any]:
+ """Get optimization settings as a dictionary."""
+ return {
+ "tensorrt": self.device.use_tensorrt,
+ "fp16": self.device.use_fp16,
+ "torch_compile": self.device.use_torch_compile,
+ "adaptive_batching": self.batch.adaptive_batching,
+ "profiling": self.performance.enable_profiling
+ }
+
+ @classmethod
+ def from_env(cls) -> "InferenceConfig":
+ """Create config from environment variables."""
+ config = cls()
+
+ # Device configuration
+ if os.getenv("DEVICE"):
+ config.device.device_type = DeviceType(os.getenv("DEVICE", "auto"))
+ if os.getenv("DEVICE_ID"):
+ config.device.device_id = int(os.getenv("DEVICE_ID"))
+ if os.getenv("USE_FP16"):
+ config.device.use_fp16 = os.getenv("USE_FP16", "false").lower() == "true"
+
+ # Batch configuration
+ if os.getenv("BATCH_SIZE"):
+ config.batch.batch_size = int(os.getenv("BATCH_SIZE"))
+ if os.getenv("MAX_BATCH_SIZE"):
+ config.batch.max_batch_size = int(os.getenv("MAX_BATCH_SIZE"))
+
+ # Performance configuration
+ if os.getenv("MAX_WORKERS"):
+ config.performance.max_workers = int(os.getenv("MAX_WORKERS"))
+ if os.getenv("LOG_LEVEL"):
+ config.performance.log_level = os.getenv("LOG_LEVEL")
+
+ return config
+
+ @classmethod
+ def from_dict(cls, config_dict: Dict[str, Any]) -> "InferenceConfig":
+ """Create config from dictionary."""
+ # This would implement recursive dataclass creation from dict
+ # For brevity, simplified implementation
+ config = cls()
+
+ if "device" in config_dict:
+ device_dict = config_dict["device"]
+ if "device_type" in device_dict:
+ config.device.device_type = DeviceType(device_dict["device_type"])
+ # ... other fields
+
+ return config
+
+ def validate(self) -> bool:
+ """Validate configuration."""
+ # Validate device configuration
+ if self.device.device_type == DeviceType.CUDA and not torch.cuda.is_available():
+ raise ValueError("CUDA requested but not available")
+
+ # Validate batch sizes
+ if self.batch.min_batch_size > self.batch.max_batch_size:
+ raise ValueError("min_batch_size cannot be greater than max_batch_size")
+
+ if self.batch.batch_size < self.batch.min_batch_size:
+ self.batch.batch_size = self.batch.min_batch_size
+ elif self.batch.batch_size > self.batch.max_batch_size:
+ self.batch.batch_size = self.batch.max_batch_size
+
+ # Validate preprocessing
+ if len(self.preprocessing.mean) != 3 or len(self.preprocessing.std) != 3:
+ raise ValueError("Mean and std must have exactly 3 values (RGB)")
+
+ # Validate thresholds
+ if not 0 <= self.postprocessing.threshold <= 1:
+ raise ValueError("Threshold must be between 0 and 1")
+
+ return True
+
+
+# Factory for creating model-specific configurations
+class ConfigFactory:
+ """Factory for creating model-specific configurations."""
+
+ @staticmethod
+ def create_classification_config(
+ num_classes: int,
+ input_size: Tuple[int, int] = (224, 224),
+ use_softmax: bool = True
+ ) -> InferenceConfig:
+ """Create configuration for image classification."""
+ config = InferenceConfig()
+ config.model_type = ModelType.CLASSIFICATION
+ config.preprocessing.input_size = input_size
+ config.postprocessing.apply_softmax = use_softmax
+ config.custom_params = {"num_classes": num_classes}
+ return config
+
+ @staticmethod
+ def create_segmentation_config(
+ input_size: Tuple[int, int] = (640, 640),
+ threshold: float = 0.5,
+ min_contour_area: int = 100
+ ) -> InferenceConfig:
+ """Create configuration for image segmentation."""
+ config = InferenceConfig()
+ config.model_type = ModelType.SEGMENTATION
+ config.preprocessing.input_size = input_size
+ config.postprocessing.threshold = threshold
+ config.custom_params = {
+ "min_contour_area": min_contour_area,
+ "max_contours": 100
+ }
+ return config
+
+ @staticmethod
+ def create_detection_config(
+ input_size: Tuple[int, int] = (640, 640),
+ confidence_threshold: float = 0.5,
+ nms_threshold: float = 0.5,
+ max_detections: int = 100
+ ) -> InferenceConfig:
+ """Create configuration for object detection."""
+ config = InferenceConfig()
+ config.model_type = ModelType.DETECTION
+ config.preprocessing.input_size = input_size
+ config.postprocessing.threshold = confidence_threshold
+ config.postprocessing.nms_threshold = nms_threshold
+ config.postprocessing.max_detections = max_detections
+ return config
+
+ @staticmethod
+ def create_optimized_config(
+ enable_tensorrt: bool = False,
+ enable_fp16: bool = False,
+ enable_torch_compile: bool = False,
+ enable_cuda: bool = None
+ ) -> InferenceConfig:
+ """Create configuration optimized for performance."""
+ config = InferenceConfig()
+
+ # Enable performance optimizations
+ config.device.use_fp16 = enable_fp16
+ config.device.use_tensorrt = enable_tensorrt
+ config.device.use_torch_compile = enable_torch_compile
+
+ # Auto-detect CUDA if not specified
+ if enable_cuda is None:
+ try:
+ import torch
+ enable_cuda = torch.cuda.is_available()
+ except ImportError:
+ enable_cuda = False
+
+ if enable_cuda:
+ config.device.device_type = "cuda"
+
+ # Optimize batch settings
+ config.batch.adaptive_batching = True
+ config.batch.max_batch_size = 32
+
+ # Enable performance monitoring
+ config.performance.enable_profiling = True
+ config.performance.enable_metrics = True
+
+ return config
+
+
+# Global configuration instance
+_global_config: Optional[InferenceConfig] = None
+
+
+def get_global_config() -> InferenceConfig:
+ """Get the global configuration instance."""
+ global _global_config
+ if _global_config is None:
+ _global_config = InferenceConfig.from_env()
+ _global_config.validate()
+ return _global_config
+
+
+def set_global_config(config: InferenceConfig) -> None:
+ """Set the global configuration instance."""
+ global _global_config
+ config.validate()
+ _global_config = config
diff --git a/framework/core/config_manager.py b/framework/core/config_manager.py
new file mode 100644
index 0000000..23bf7c7
--- /dev/null
+++ b/framework/core/config_manager.py
@@ -0,0 +1,437 @@
+"""
+Configuration Manager for PyTorch Inference Framework
+
+This module provides unified configuration management that can read from:
+- Environment variables (.env files)
+- YAML configuration files
+- Environment-specific overrides
+
+Configuration precedence (highest to lowest):
+1. Environment variables
+2. config.yaml environment-specific overrides
+3. config.yaml base configuration
+4. Default values
+"""
+
+import os
+import yaml
+from pathlib import Path
+from typing import Any, Dict, Optional, Union, TYPE_CHECKING
+from dotenv import load_dotenv
+import logging
+
+from .config import (
+ InferenceConfig, DeviceConfig, BatchConfig, PreprocessingConfig,
+ PostprocessingConfig, PerformanceConfig, CacheConfig, SecurityConfig,
+ DeviceType, ModelType
+)
+
+if TYPE_CHECKING:
+ from ..enterprise.config import EnterpriseConfig
+
+try:
+ from ..enterprise.config import EnterpriseConfig
+ ENTERPRISE_AVAILABLE = True
+except ImportError:
+ ENTERPRISE_AVAILABLE = False
+ EnterpriseConfig = None
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigManager:
+ """Unified configuration manager for the inference framework."""
+
+ def __init__(self,
+ env_file: Optional[Union[str, Path]] = None,
+ config_file: Optional[Union[str, Path]] = None,
+ config_dir: Optional[Union[str, Path]] = None,
+ environment: str = "development"):
+ """
+ Initialize the configuration manager.
+
+ Args:
+ env_file: Path to .env file (defaults to .env in project root or config_dir)
+ config_file: Path to config.yaml file (defaults to config.yaml in project root or config_dir)
+ config_dir: Base directory for config files
+ environment: Current environment (development, staging, production)
+ """
+ self.environment = environment
+
+ # Determine base directory
+ if config_dir:
+ base_dir = Path(config_dir)
+ else:
+ base_dir = Path(__file__).parent.parent.parent
+
+ # Determine file paths
+ self.env_file = Path(env_file) if env_file else base_dir / ".env"
+ self.config_file = Path(config_file) if config_file else base_dir / "config.yaml"
+
+ # Load configurations
+ self._env_config = self._load_env_config()
+ self._yaml_config = self._load_yaml_config()
+
+ logger.info(f"Configuration loaded for environment: {self.environment}")
+
+ def _load_env_config(self) -> Dict[str, Any]:
+ """Load environment variables from .env file."""
+ if self.env_file.exists():
+ load_dotenv(self.env_file, override=True)
+ logger.debug(f"Loaded environment configuration from {self.env_file}")
+ else:
+ logger.warning(f"Environment file not found: {self.env_file}")
+
+ # Return environment variables as dict
+ return dict(os.environ)
+
+ def _load_yaml_config(self) -> Dict[str, Any]:
+ """Load YAML configuration file."""
+ if not self.config_file.exists():
+ logger.warning(f"YAML config file not found: {self.config_file}")
+ return {}
+
+ try:
+ with open(self.config_file, 'r', encoding='utf-8') as f:
+ config = yaml.safe_load(f) or {}
+
+ # Apply environment-specific overrides
+ if 'environments' in config and self.environment in config['environments']:
+ env_overrides = config['environments'][self.environment]
+ config = self._deep_merge(config, env_overrides)
+ # Remove the environments key to avoid confusion
+ if 'environments' in config:
+ del config['environments']
+
+ logger.debug(f"Loaded YAML configuration from {self.config_file}")
+ return config
+
+ except Exception as e:
+ logger.error(f"Failed to load YAML config: {e}")
+ return {}
+
+ def _deep_merge(self, base: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
+ """Deep merge two dictionaries, with override taking precedence."""
+ result = base.copy()
+
+ for key, value in override.items():
+ if key in result and isinstance(result[key], dict) and isinstance(value, dict):
+ result[key] = self._deep_merge(result[key], value)
+ else:
+ result[key] = value
+
+ return result
+
+ def get(self, key: str, default: Any = None, config_path: Optional[str] = None) -> Any:
+ """
+ Get configuration value with precedence: env > yaml > default.
+
+ Args:
+ key: Configuration key (for env vars)
+ default: Default value if not found
+ config_path: Dot-separated path for YAML config (e.g., 'device.type')
+
+ Returns:
+ Configuration value
+ """
+ # Check environment variables first
+ env_value = self._env_config.get(key)
+ if env_value is not None:
+ return self._convert_type(env_value, default)
+
+ # Check YAML config
+ if config_path:
+ yaml_value = self._get_nested_value(self._yaml_config, config_path)
+ if yaml_value is not None:
+ return yaml_value
+
+ return default
+
+ def _get_nested_value(self, config: Dict[str, Any], path: str) -> Any:
+ """Get nested value from config using dot-separated path."""
+ keys = path.split('.')
+ value = config
+
+ try:
+ for key in keys:
+ value = value[key]
+ return value
+ except (KeyError, TypeError):
+ return None
+
+ def _convert_type(self, value: str, reference: Any) -> Any:
+ """Convert string environment variable to appropriate type."""
+ if reference is None:
+ return value
+
+ if isinstance(reference, bool):
+ return value.lower() in ('true', '1', 'yes', 'on')
+ elif isinstance(reference, int):
+ try:
+ return int(value)
+ except ValueError:
+ return reference
+ elif isinstance(reference, float):
+ try:
+ return float(value)
+ except ValueError:
+ return reference
+ elif isinstance(reference, list):
+ # Handle comma-separated lists
+ if isinstance(value, str):
+ return [item.strip() for item in value.split(',') if item.strip()]
+ return value
+
+ return value
+
+ def get_server_config(self) -> Dict[str, Any]:
+ """Get server configuration."""
+ return {
+ 'host': self.get('HOST', '0.0.0.0', 'server.host'),
+ 'port': self.get('PORT', 8000, 'server.port'),
+ 'reload': self.get('RELOAD', False, 'server.reload'),
+ 'log_level': self.get('LOG_LEVEL', 'INFO', 'server.log_level'),
+ 'workers': self.get('WORKERS', 1, 'server.workers')
+ }
+
+ def get_inference_config(self) -> InferenceConfig:
+ """Create InferenceConfig from loaded configuration."""
+ # Reload environment to catch any changes made via patch.dict
+ current_env = dict(os.environ)
+
+ # Device configuration
+ device_type = 'auto' # Default
+ if 'DEVICE' in current_env:
+ device_type = current_env['DEVICE'].lower()
+ elif 'device' in self._yaml_config:
+ # Support both 'device_type' and 'type' keys for flexibility
+ if 'device_type' in self._yaml_config['device']:
+ device_type = str(self._yaml_config['device']['device_type']).lower()
+ elif 'type' in self._yaml_config['device']:
+ device_type = str(self._yaml_config['device']['type']).lower()
+
+ device_config = DeviceConfig(
+ device_type=DeviceType.from_string(device_type),
+ device_id=current_env.get('DEVICE_ID') and int(current_env.get('DEVICE_ID')),
+ use_fp16=current_env.get('USE_FP16', 'false').lower() == 'true' if 'USE_FP16' in current_env else self._yaml_config.get('device', {}).get('use_fp16', False),
+ use_torch_compile=current_env.get('USE_TORCH_COMPILE', 'false').lower() == 'true' if 'USE_TORCH_COMPILE' in current_env else self._yaml_config.get('device', {}).get('use_torch_compile', False)
+ )
+
+ # Batch configuration - handle environment variables properly
+ batch_size = 2 # Default
+ if 'BATCH_SIZE' in current_env:
+ try:
+ batch_size = int(current_env['BATCH_SIZE'])
+ except (ValueError, TypeError):
+ pass
+ elif self._yaml_config.get('batch', {}).get('batch_size'):
+ batch_size = self._yaml_config['batch']['batch_size']
+
+ min_batch_size = 1
+ if 'MIN_BATCH_SIZE' in current_env:
+ try:
+ min_batch_size = int(current_env['MIN_BATCH_SIZE'])
+ except (ValueError, TypeError):
+ pass
+ elif self._yaml_config.get('batch', {}).get('min_batch_size'):
+ min_batch_size = self._yaml_config['batch']['min_batch_size']
+
+ max_batch_size = 16
+ if 'MAX_BATCH_SIZE' in current_env:
+ try:
+ max_batch_size = int(current_env['MAX_BATCH_SIZE'])
+ except (ValueError, TypeError):
+ pass
+ elif self._yaml_config.get('batch', {}).get('max_batch_size'):
+ max_batch_size = self._yaml_config['batch']['max_batch_size']
+
+ # Ensure batch_size doesn't exceed max_batch_size
+ if batch_size > max_batch_size:
+ max_batch_size = max(batch_size, 16) # Expand max_batch_size if needed
+
+ batch_config = BatchConfig(
+ batch_size=batch_size,
+ min_batch_size=min_batch_size,
+ max_batch_size=max_batch_size,
+ adaptive_batching=self.get('ADAPTIVE_BATCHING', True, 'batch.adaptive_batching'),
+ timeout_seconds=self.get('BATCH_TIMEOUT', 5.0, 'batch.timeout_seconds'),
+ queue_size=self.get('QUEUE_SIZE', 100, 'batch.queue_size')
+ )
+
+ # Preprocessing configuration
+ input_size = (
+ self.get('INPUT_SIZE_WIDTH', 224, 'preprocessing.input_size.width'),
+ self.get('INPUT_SIZE_HEIGHT', 224, 'preprocessing.input_size.height')
+ )
+ mean = [
+ self.get('MEAN_R', 0.485, 'preprocessing.normalization.mean.0'),
+ self.get('MEAN_G', 0.456, 'preprocessing.normalization.mean.1'),
+ self.get('MEAN_B', 0.406, 'preprocessing.normalization.mean.2')
+ ]
+ std = [
+ self.get('STD_R', 0.229, 'preprocessing.normalization.std.0'),
+ self.get('STD_G', 0.224, 'preprocessing.normalization.std.1'),
+ self.get('STD_B', 0.225, 'preprocessing.normalization.std.2')
+ ]
+
+ # Handle YAML list format
+ yaml_mean = self._get_nested_value(self._yaml_config, 'preprocessing.normalization.mean')
+ if yaml_mean and isinstance(yaml_mean, list):
+ mean = yaml_mean
+
+ yaml_std = self._get_nested_value(self._yaml_config, 'preprocessing.normalization.std')
+ if yaml_std and isinstance(yaml_std, list):
+ std = yaml_std
+
+ preprocessing_config = PreprocessingConfig(
+ input_size=input_size,
+ mean=mean,
+ std=std,
+ interpolation=self.get('INTERPOLATION', 'bilinear', 'preprocessing.interpolation'),
+ center_crop=self.get('CENTER_CROP', True, 'preprocessing.center_crop'),
+ normalize=self.get('NORMALIZE', True, 'preprocessing.normalize'),
+ to_rgb=self.get('TO_RGB', True, 'preprocessing.to_rgb')
+ )
+
+ # Postprocessing configuration
+ postprocessing_config = PostprocessingConfig(
+ threshold=self.get('THRESHOLD', 0.5, 'postprocessing.threshold'),
+ nms_threshold=self.get('NMS_THRESHOLD', 0.5, 'postprocessing.nms_threshold'),
+ max_detections=self.get('MAX_DETECTIONS', 100, 'postprocessing.max_detections'),
+ apply_sigmoid=self.get('APPLY_SIGMOID', False, 'postprocessing.apply_sigmoid'),
+ apply_softmax=self.get('APPLY_SOFTMAX', False, 'postprocessing.apply_softmax')
+ )
+
+ # Performance configuration
+ performance_config = PerformanceConfig(
+ enable_profiling=self.get('ENABLE_PROFILING', False, 'performance.enable_profiling'),
+ enable_metrics=self.get('ENABLE_METRICS', True, 'performance.enable_metrics'),
+ warmup_iterations=self.get('WARMUP_ITERATIONS', 3, 'performance.warmup_iterations'),
+ benchmark_iterations=self.get('BENCHMARK_ITERATIONS', 10, 'performance.benchmark_iterations'),
+ log_level=self.get('LOG_LEVEL', 'INFO', 'monitoring.logging.level'),
+ enable_async=self.get('ENABLE_ASYNC', True, 'performance.enable_async'),
+ max_workers=self.get('MAX_WORKERS', 4, 'performance.max_workers')
+ )
+
+ # Cache configuration
+ cache_config = CacheConfig(
+ enable_caching=self.get('ENABLE_CACHING', True, 'cache.enable_caching'),
+ cache_size=self.get('CACHE_SIZE', 100, 'cache.cache_size'),
+ cache_ttl_seconds=self.get('CACHE_TTL_SECONDS', 3600, 'cache.cache_ttl_seconds'),
+ disk_cache_path=self.get('DISK_CACHE_PATH', None, 'cache.disk_cache_path')
+ )
+
+ # Security configuration
+ allowed_extensions = self.get('ALLOWED_EXTENSIONS', ['.jpg', '.jpeg', '.png', '.bmp'], 'security.allowed_extensions')
+ if isinstance(allowed_extensions, str):
+ allowed_extensions = [ext.strip() for ext in allowed_extensions.split(',')]
+
+ security_config = SecurityConfig(
+ max_file_size_mb=self.get('MAX_FILE_SIZE_MB', 100, 'security.max_file_size_mb'),
+ allowed_extensions=allowed_extensions,
+ validate_inputs=self.get('VALIDATE_INPUTS', True, 'security.validate_inputs'),
+ sanitize_outputs=self.get('SANITIZE_OUTPUTS', True, 'security.sanitize_outputs')
+ )
+
+ # Model type (default to CUSTOM)
+ model_type = ModelType.CUSTOM
+
+ return InferenceConfig(
+ model_type=model_type,
+ device=device_config,
+ batch=batch_config,
+ preprocessing=preprocessing_config,
+ postprocessing=postprocessing_config,
+ performance=performance_config,
+ cache=cache_config,
+ security=security_config
+ )
+
+ def get_enterprise_config(self) -> Optional[Any]:
+ """Get enterprise configuration if enterprise features are enabled."""
+ enterprise_enabled = self.get('ENTERPRISE_ENABLED', False, 'enterprise.enabled')
+
+ if not enterprise_enabled or not ENTERPRISE_AVAILABLE:
+ return None
+
+ try:
+ # Create base inference config
+ inference_config = self.get_inference_config()
+
+ # Create enterprise config with inference config
+ enterprise_config = EnterpriseConfig(inference=inference_config)
+
+ # Set basic properties
+ enterprise_config.environment = self.get('ENVIRONMENT', 'development', 'enterprise.environment')
+ enterprise_config.tenant_id = self.get('TENANT_ID', None, 'enterprise.tenant_id')
+ enterprise_config.deployment_id = self.get('DEPLOYMENT_ID', '', 'enterprise.deployment_id')
+ enterprise_config.version = self.get('VERSION', '1.0.0', 'app.version')
+
+ # Auth configuration
+ enterprise_config.auth.secret_key = self.get('JWT_SECRET_KEY', '', 'enterprise.auth.secret_key')
+ enterprise_config.auth.oauth2_client_id = self.get('OAUTH2_CLIENT_ID', '', 'enterprise.auth.oauth2_client_id')
+ enterprise_config.auth.oauth2_client_secret = self.get('OAUTH2_CLIENT_SECRET', '', 'enterprise.auth.oauth2_client_secret')
+
+ # Security configuration
+ enterprise_config.security.enable_encryption_at_rest = self.get('ENABLE_ENCRYPTION_AT_REST', False, 'enterprise.security.enable_encryption_at_rest')
+ enterprise_config.security.rate_limit_requests_per_minute = self.get('RATE_LIMIT_RPM', 100, 'enterprise.security.rate_limit_requests_per_minute')
+ enterprise_config.security.enable_audit_logging = self.get('ENABLE_AUDIT_LOGGING', False, 'enterprise.security.enable_audit_logging')
+
+ # Monitoring configuration
+ enterprise_config.monitoring.jaeger_endpoint = self.get('JAEGER_ENDPOINT', '', 'monitoring.tracing.jaeger_endpoint')
+ enterprise_config.monitoring.metrics_port = self.get('METRICS_PORT', 9090, 'monitoring.metrics.port')
+ enterprise_config.monitoring.log_level = self.get('LOG_LEVEL', 'INFO', 'monitoring.logging.level')
+
+ # Integration configuration
+ enterprise_config.integration.database_url = self.get('DATABASE_URL', '', 'enterprise.integration.database_url')
+ enterprise_config.integration.cache_url = self.get('CACHE_URL', 'redis://localhost:6379/0', 'enterprise.integration.cache_url')
+ enterprise_config.integration.message_broker_url = self.get('MESSAGE_BROKER_URL', '', 'enterprise.integration.message_broker_url')
+
+ return enterprise_config
+
+ except ImportError:
+ logger.warning("Enterprise features not available")
+ return None
+ except Exception as e:
+ logger.error(f"Failed to create enterprise config: {e}")
+ return None
+
+ def reload_config(self):
+ """Reload configuration from files."""
+ self._env_config = self._load_env_config()
+ self._yaml_config = self._load_yaml_config()
+ logger.info("Configuration reloaded")
+
+ def export_config(self) -> Dict[str, Any]:
+ """Export current configuration for debugging/logging."""
+ return {
+ 'environment': self.environment,
+ 'env_file': str(self.env_file),
+ 'config_file': str(self.config_file),
+ 'server': self.get_server_config(),
+ 'enterprise_enabled': self.get('ENTERPRISE_ENABLED', False, 'enterprise.enabled')
+ }
+
+
+# Global configuration manager instance
+_config_manager: Optional[ConfigManager] = None
+
+
+def get_config_manager(environment: str = None) -> ConfigManager:
+ """Get the global configuration manager instance."""
+ global _config_manager
+
+ if _config_manager is None or (environment and _config_manager.environment != environment):
+ # Determine environment from various sources
+ if not environment:
+ environment = os.getenv('ENVIRONMENT', 'development')
+
+ _config_manager = ConfigManager(environment=environment)
+
+ return _config_manager
+
+
+def set_config_manager(config_manager: ConfigManager):
+ """Set the global configuration manager instance."""
+ global _config_manager
+ _config_manager = config_manager
diff --git a/framework/core/inference_engine.py b/framework/core/inference_engine.py
new file mode 100644
index 0000000..f61e070
--- /dev/null
+++ b/framework/core/inference_engine.py
@@ -0,0 +1,543 @@
+"""
+Advanced inference engine with optimized batching, async support, and monitoring.
+
+This module provides a production-ready inference engine with features like:
+- Dynamic batch sizing with PID control
+- Asynchronous processing
+- Performance monitoring
+- Memory management
+- Error handling and recovery
+"""
+
+import asyncio
+import time
+import logging
+from typing import Any, Dict, List, Optional, Union, Callable, Tuple
+from dataclasses import dataclass
+from collections import deque
+import threading
+from concurrent.futures import ThreadPoolExecutor
+import torch
+from contextlib import asynccontextmanager
+
+from ..core.base_model import BaseModel
+from ..core.config import InferenceConfig
+from ..utils.monitoring import PerformanceMonitor, MetricsCollector
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class InferenceRequest:
+ """Individual inference request."""
+ id: str
+ inputs: Any
+ future: asyncio.Future
+ timestamp: float
+ priority: int = 0
+ timeout: Optional[float] = None
+
+
+@dataclass
+class BatchResult:
+ """Result of batch inference."""
+ outputs: List[Any]
+ batch_size: int
+ processing_time: float
+ memory_usage: Dict[str, float]
+
+
+class PIDController:
+ """PID controller for dynamic batch size adjustment."""
+
+ def __init__(self, kp: float = 0.6, ki: float = 0.1, kd: float = 0.05,
+ setpoint: float = 50.0, min_value: int = 1, max_value: int = 32):
+ self.kp = kp
+ self.ki = ki
+ self.kd = kd
+ self.setpoint = setpoint # Target latency in ms
+ self.min_value = min_value
+ self.max_value = max_value
+
+ self.prev_error = 0
+ self.integral = 0
+ self.last_value = min_value
+ self.last_time = time.time()
+
+ def update(self, current_value: float) -> int:
+ """Update controller with current measurement."""
+ current_time = time.time()
+ dt = current_time - self.last_time
+
+ error = self.setpoint - current_value
+ self.integral += error * dt
+
+ # Anti-windup: limit integral term
+ self.integral = max(-100, min(100, self.integral))
+
+ derivative = (error - self.prev_error) / dt if dt > 0 else 0
+ output = self.kp * error + self.ki * self.integral + self.kd * derivative
+
+ self.prev_error = error
+ self.last_time = current_time
+
+ # Apply output to current batch size
+ new_value = self.last_value + output * 0.1 # Scale down the adjustment
+ new_value = max(self.min_value, min(self.max_value, round(new_value)))
+
+ self.last_value = int(new_value)
+ return self.last_value
+
+ def reset(self):
+ """Reset controller state."""
+ self.prev_error = 0
+ self.integral = 0
+ self.last_value = self.min_value
+ self.last_time = time.time()
+
+
+class RequestQueue:
+ """Thread-safe request queue with priority support."""
+
+ def __init__(self, max_size: int = 1000):
+ self.max_size = max_size
+ self._queue = deque()
+ self._lock = threading.RLock()
+ self._not_empty = threading.Condition(self._lock)
+ self._not_full = threading.Condition(self._lock)
+
+ async def put(self, request: InferenceRequest, timeout: Optional[float] = None) -> None:
+ """Add request to queue."""
+ def _put():
+ with self._not_full:
+ while len(self._queue) >= self.max_size:
+ if not self._not_full.wait(timeout=timeout):
+ raise asyncio.TimeoutError("Queue full")
+
+ # Insert based on priority (higher priority first)
+ inserted = False
+ for i, existing in enumerate(self._queue):
+ if request.priority > existing.priority:
+ self._queue.insert(i, request)
+ inserted = True
+ break
+
+ if not inserted:
+ self._queue.append(request)
+
+ self._not_empty.notify()
+
+ loop = asyncio.get_running_loop()
+ await loop.run_in_executor(None, _put)
+
+ def get_batch(self, max_batch_size: int, timeout: Optional[float] = None) -> List[InferenceRequest]:
+ """Get a batch of requests."""
+ with self._not_empty:
+ # Wait for at least one request
+ while not self._queue:
+ if not self._not_empty.wait(timeout=timeout):
+ return []
+
+ # Collect batch
+ batch = []
+ while len(batch) < max_batch_size and self._queue:
+ batch.append(self._queue.popleft())
+
+ self._not_full.notify_all()
+ return batch
+
+ def size(self) -> int:
+ """Get current queue size."""
+ with self._lock:
+ return len(self._queue)
+
+ def clear(self) -> None:
+ """Clear the queue."""
+ with self._lock:
+ self._queue.clear()
+ self._not_full.notify_all()
+
+
+class InferenceEngine:
+ """
+ Advanced inference engine with dynamic batching and async support.
+ """
+
+ def __init__(self, model: BaseModel, config: Optional[InferenceConfig] = None):
+ self.model = model
+ self.config = config or model.config
+ self.device = self.model.device
+
+ # Initialize components
+ self.request_queue = RequestQueue(max_size=self.config.batch.queue_size)
+ self.pid_controller = PIDController(
+ kp=0.6, ki=0.1, kd=0.05,
+ setpoint=50.0, # Target 50ms per batch
+ min_value=self.config.batch.min_batch_size,
+ max_value=self.config.batch.max_batch_size
+ )
+
+ # Performance monitoring
+ self.performance_monitor = PerformanceMonitor()
+ self.metrics_collector = MetricsCollector()
+
+ # State management
+ self._running = False
+ self._worker_task: Optional[asyncio.Task] = None
+ self._executor = ThreadPoolExecutor(max_workers=self.config.performance.max_workers)
+ self._request_counter = 0
+ self._stats = {
+ "requests_processed": 0,
+ "batches_processed": 0,
+ "total_processing_time": 0.0,
+ "average_batch_size": 0.0,
+ "errors": 0
+ }
+
+ # Current batch size (managed by PID controller)
+ self._current_batch_size = self.config.batch.batch_size
+
+ self.logger = logging.getLogger(f"{__name__}.InferenceEngine")
+ self.logger.info(f"Initialized inference engine with device: {self.device}")
+
+ async def start(self) -> None:
+ """Start the inference engine."""
+ if self._running:
+ self.logger.warning("Engine already running")
+ return
+
+ self._running = True
+ self._worker_task = asyncio.create_task(self._worker_loop())
+ self.logger.info("Inference engine started")
+
+ async def stop(self) -> None:
+ """Stop the inference engine."""
+ if not self._running:
+ return
+
+ self._running = False
+
+ if self._worker_task:
+ self._worker_task.cancel()
+ try:
+ await self._worker_task
+ except asyncio.CancelledError:
+ pass
+
+ # Clear remaining requests
+ self.request_queue.clear()
+ self._executor.shutdown(wait=True)
+
+ self.logger.info("Inference engine stopped")
+
+ async def predict(self, inputs: Any, priority: int = 0, timeout: Optional[float] = None) -> Any:
+ """
+ Submit inference request and get result.
+
+ Args:
+ inputs: Input data for inference
+ priority: Request priority (higher = processed first)
+ timeout: Timeout in seconds
+
+ Returns:
+ Prediction result
+ """
+ if not self._running:
+ raise RuntimeError("Engine not running. Call start() first.")
+
+ # Create request
+ request_id = f"req_{self._request_counter}"
+ self._request_counter += 1
+
+ future = asyncio.Future()
+ request = InferenceRequest(
+ id=request_id,
+ inputs=inputs,
+ future=future,
+ timestamp=time.time(),
+ priority=priority,
+ timeout=timeout or self.config.batch.timeout_seconds
+ )
+
+ # Submit request
+ await self.request_queue.put(request, timeout=timeout)
+
+ # Wait for result
+ try:
+ result = await asyncio.wait_for(future, timeout=request.timeout)
+ return result
+ except asyncio.TimeoutError:
+ self.logger.warning(f"Request {request_id} timed out after {request.timeout}s")
+ raise
+
+ async def predict_batch(self, inputs_list: List[Any], priority: int = 0,
+ timeout: Optional[float] = None) -> List[Any]:
+ """Batch prediction with individual request tracking."""
+ if not inputs_list:
+ return []
+
+ # Submit all requests
+ tasks = []
+ for inputs in inputs_list:
+ task = self.predict(inputs, priority, timeout)
+ tasks.append(task)
+
+ # Wait for all results
+ results = await asyncio.gather(*tasks)
+ return results
+
+ async def _worker_loop(self) -> None:
+ """Main worker loop for processing batched requests."""
+ self.logger.info("Worker loop started")
+
+ while self._running:
+ try:
+ # Get batch of requests
+ batch_timeout = self.config.batch.timeout_seconds / 10 # Short timeout for responsiveness
+ requests = await asyncio.get_running_loop().run_in_executor(
+ None, self.request_queue.get_batch, self._current_batch_size, batch_timeout
+ )
+
+ if not requests:
+ continue
+
+ # Process batch
+ await self._process_batch(requests)
+
+ except Exception as e:
+ self.logger.error(f"Error in worker loop: {e}", exc_info=True)
+ await asyncio.sleep(0.1) # Brief pause before retry
+
+ async def _process_batch(self, requests: List[InferenceRequest]) -> None:
+ """Process a batch of inference requests."""
+ batch_size = len(requests)
+ start_time = time.time()
+
+ try:
+ # Check for expired requests
+ current_time = time.time()
+ valid_requests = []
+ for req in requests:
+ if req.timeout and (current_time - req.timestamp) > req.timeout:
+ req.future.set_exception(asyncio.TimeoutError("Request expired"))
+ self.logger.debug(f"Request {req.id} expired")
+ else:
+ valid_requests.append(req)
+
+ if not valid_requests:
+ return
+
+ # Extract inputs
+ inputs = [req.inputs for req in valid_requests]
+
+ # Run inference
+ if len(inputs) == 1:
+ # Single inference
+ result = await self._run_single_inference(inputs[0])
+ results = [result]
+ else:
+ # Batch inference
+ results = await self._run_batch_inference(inputs)
+
+ # Set results
+ for req, result in zip(valid_requests, results):
+ if not req.future.done():
+ req.future.set_result(result)
+
+ # Update performance metrics
+ processing_time = time.time() - start_time
+ self._update_metrics(batch_size, processing_time)
+
+ # Update PID controller for batch size adjustment
+ latency_ms = processing_time * 1000
+ new_batch_size = self.pid_controller.update(latency_ms)
+
+ if new_batch_size != self._current_batch_size:
+ self.logger.debug(f"Adjusted batch size: {self._current_batch_size} -> {new_batch_size}")
+ self._current_batch_size = new_batch_size
+
+ except Exception as e:
+ self.logger.error(f"Batch processing failed: {e}", exc_info=True)
+
+ # Set exception for all requests
+ for req in requests:
+ if not req.future.done():
+ req.future.set_exception(e)
+
+ self._stats["errors"] += 1
+
+ async def _run_single_inference(self, inputs: Any) -> Any:
+ """Run inference on single input."""
+ loop = asyncio.get_running_loop()
+ return await loop.run_in_executor(self._executor, self.model.predict, inputs)
+
+ async def _run_batch_inference(self, inputs: List[Any]) -> List[Any]:
+ """Run batch inference."""
+ # Check if model supports true batch processing
+ if hasattr(self.model, 'predict_batch_internal') and len(inputs) > 1:
+ loop = asyncio.get_running_loop()
+ return await loop.run_in_executor(self._executor, self.model.predict_batch_internal, inputs)
+ else:
+ # Fall back to individual processing
+ tasks = [self._run_single_inference(inp) for inp in inputs]
+ return await asyncio.gather(*tasks)
+
+ def _update_metrics(self, batch_size: int, processing_time: float) -> None:
+ """Update performance metrics."""
+ self._stats["requests_processed"] += batch_size
+ self._stats["batches_processed"] += 1
+ self._stats["total_processing_time"] += processing_time
+
+ # Update average batch size (exponential moving average)
+ alpha = 0.1
+ self._stats["average_batch_size"] = (
+ alpha * batch_size + (1 - alpha) * self._stats["average_batch_size"]
+ )
+
+ # Collect metrics
+ self.metrics_collector.record_batch_metrics(
+ batch_size=batch_size,
+ processing_time=processing_time,
+ queue_size=self.request_queue.size(),
+ memory_usage=self.model.get_memory_usage()
+ )
+
+ @asynccontextmanager
+ async def engine_context(self):
+ """Context manager for automatic engine lifecycle management."""
+ await self.start()
+ try:
+ yield self
+ finally:
+ await self.stop()
+
+ @asynccontextmanager
+ async def async_context(self):
+ """Alias for engine_context for backward compatibility."""
+ async with self.engine_context() as context:
+ yield context
+
+ def get_stats(self) -> Dict[str, Any]:
+ """Get current engine statistics."""
+ stats = self._stats.copy()
+
+ if stats["batches_processed"] > 0:
+ stats["average_processing_time"] = (
+ stats["total_processing_time"] / stats["batches_processed"]
+ )
+ stats["throughput_rps"] = (
+ stats["requests_processed"] / stats["total_processing_time"]
+ if stats["total_processing_time"] > 0 else 0
+ )
+
+ stats.update({
+ "current_batch_size": self._current_batch_size,
+ "queue_size": self.request_queue.size(),
+ "running": self._running,
+ "memory_usage": self.model.get_memory_usage()
+ })
+
+ return stats
+
+ async def cleanup(self) -> None:
+ """Clean up engine resources."""
+ await self.stop()
+
+ # Clear queues and stats
+ self.request_queue.clear()
+ self._stats = {
+ "requests_processed": 0,
+ "batches_processed": 0,
+ "total_processing_time": 0.0,
+ "average_batch_size": 0.0,
+ }
+
+ self.logger.info("Engine cleanup completed")
+
+ def get_performance_report(self) -> Dict[str, Any]:
+ """Get detailed performance report."""
+ stats = self.get_stats()
+ return {
+ "stats": stats, # Keep original key
+ "engine_stats": stats, # Add for test compatibility
+ "performance_metrics": stats, # Add for test compatibility
+ "current_batch_size": stats.get("current_batch_size", self._current_batch_size), # Add for test compatibility
+ "model_info": self.model.model_info,
+ "metrics": self.metrics_collector.get_summary(),
+ "config": {
+ "batch_size_range": (
+ self.config.batch.min_batch_size,
+ self.config.batch.max_batch_size
+ ),
+ "queue_size": self.config.batch.queue_size,
+ "timeout": self.config.batch.timeout_seconds,
+ "device": str(self.device)
+ }
+ }
+
+ async def health_check(self) -> Dict[str, Any]:
+ """Perform health check."""
+ health_status = {
+ "healthy": True,
+ "checks": {},
+ "timestamp": time.time()
+ }
+
+ # Check if engine is running
+ health_status["checks"]["engine_running"] = self._running
+ if not self._running:
+ health_status["healthy"] = False
+
+ # Check model status
+ health_status["checks"]["model_loaded"] = self.model.is_loaded
+ if not self.model.is_loaded:
+ health_status["healthy"] = False
+
+ # Check queue size
+ queue_size = self.request_queue.size()
+ health_status["checks"]["queue_size"] = queue_size
+ health_status["checks"]["queue_healthy"] = queue_size < self.config.batch.queue_size * 0.9
+ if queue_size >= self.config.batch.queue_size * 0.9:
+ health_status["healthy"] = False
+
+ # Check memory usage if available
+ memory_usage = self.model.get_memory_usage()
+ if "gpu_allocated_mb" in memory_usage:
+ # Check if GPU memory usage is reasonable (less than 90%)
+ try:
+ total_memory = torch.cuda.get_device_properties(self.device).total_memory / (1024**2)
+ usage_percent = memory_usage["gpu_allocated_mb"] / total_memory * 100
+ health_status["checks"]["gpu_memory_percent"] = usage_percent
+ health_status["checks"]["gpu_memory_healthy"] = usage_percent < 90
+ if usage_percent >= 90:
+ health_status["healthy"] = False
+ except Exception:
+ pass
+
+ # Test inference if healthy so far
+ if health_status["healthy"]:
+ try:
+ # Quick test inference
+ dummy_input = self.model._create_dummy_input()
+ test_start = time.time()
+ await self.predict(dummy_input, timeout=5.0)
+ test_time = time.time() - test_start
+
+ health_status["checks"]["test_inference_ms"] = test_time * 1000
+ health_status["checks"]["inference_healthy"] = test_time < 5.0
+ if test_time >= 5.0:
+ health_status["healthy"] = False
+
+ except Exception as e:
+ health_status["healthy"] = False
+ health_status["checks"]["inference_error"] = str(e)
+
+ return health_status
+
+
+# Factory function for creating inference engines
+def create_inference_engine(model: BaseModel, config: Optional[InferenceConfig] = None) -> InferenceEngine:
+ """Create and configure an inference engine."""
+ engine = InferenceEngine(model, config)
+ return engine
diff --git a/framework/core/optimized_model.py b/framework/core/optimized_model.py
new file mode 100644
index 0000000..f8589d4
--- /dev/null
+++ b/framework/core/optimized_model.py
@@ -0,0 +1,558 @@
+"""
+Optimized model adapter for PyTorch inference.
+
+This module provides an enhanced model adapter that automatically applies
+various optimizations including TensorRT, ONNX, quantization, and more.
+"""
+
+import logging
+import time
+from typing import Dict, List, Optional, Tuple, Union, Any
+from pathlib import Path
+import warnings
+
+import torch
+import torch.nn as nn
+
+from .base_model import BaseModel, ModelLoadError, ModelInferenceError
+from .config import InferenceConfig
+from ..optimizers import (
+ TensorRTOptimizer, ONNXOptimizer, QuantizationOptimizer,
+ MemoryOptimizer, CUDAOptimizer, JITOptimizer,
+ convert_to_tensorrt, convert_to_onnx, quantize_model,
+ enable_cuda_optimizations, jit_compile_model
+)
+
+
+logger = logging.getLogger(__name__)
+
+
+class OptimizedModel(BaseModel):
+ """
+ Enhanced model class with automatic optimization capabilities.
+ """
+
+ def __init__(self, config: InferenceConfig):
+ """
+ Initialize optimized model.
+
+ Args:
+ config: Inference configuration with optimization settings
+ """
+ super().__init__(config)
+
+ # Initialize optimizers
+ self.optimizers = self._initialize_optimizers()
+ self.optimized_models = {}
+ self.active_optimization = "pytorch" # Default to PyTorch
+
+ # Performance tracking
+ self.optimization_benchmarks = {}
+
+ self.logger = logging.getLogger(f"{__name__}.OptimizedModel")
+
+ def _initialize_optimizers(self) -> Dict[str, Any]:
+ """Initialize all available optimizers."""
+ optimizers = {}
+
+ # Only initialize optimizers that are actually available
+ if TensorRTOptimizer is not None:
+ optimizers['tensorrt'] = TensorRTOptimizer(self.config)
+
+ if ONNXOptimizer is not None:
+ optimizers['onnx'] = ONNXOptimizer(self.config)
+
+ if QuantizationOptimizer is not None:
+ optimizers['quantization'] = QuantizationOptimizer(self.config)
+
+ if MemoryOptimizer is not None:
+ optimizers['memory'] = MemoryOptimizer(self.config)
+
+ if CUDAOptimizer is not None:
+ optimizers['cuda'] = CUDAOptimizer(self.config)
+
+ if JITOptimizer is not None:
+ optimizers['jit'] = JITOptimizer(self.config)
+
+ return optimizers
+
+ def load_model(self, model_path: Union[str, Path]) -> None:
+ """
+ Load model and apply automatic optimizations.
+
+ Args:
+ model_path: Path to model file
+ """
+ super().load_model(model_path)
+
+ if self._is_loaded:
+ self.logger.info("Applying automatic optimizations")
+ self._apply_optimizations()
+
+ def _apply_optimizations(self) -> None:
+ """Apply configured optimizations to the loaded model."""
+ if not self._is_loaded or self.model is None:
+ return
+
+ # Create example input for optimization
+ example_input = self._create_dummy_input()
+
+ # Apply optimizations in priority order
+ optimization_order = self._get_optimization_order()
+
+ for optimization in optimization_order:
+ try:
+ self._apply_single_optimization(optimization, example_input)
+ except Exception as e:
+ self.logger.warning(f"Failed to apply {optimization} optimization: {e}")
+
+ # Select best performing optimization
+ self._select_best_optimization()
+
+ def _get_optimization_order(self) -> List[str]:
+ """Get the order in which optimizations should be applied."""
+ order = []
+
+ # CUDA optimizations first (if available)
+ if torch.cuda.is_available() and getattr(self.config.device, 'use_cuda_optimizations', True):
+ order.append('cuda')
+
+ # JIT compilation
+ if getattr(self.config.device, 'use_torch_compile', True):
+ order.append('jit')
+
+ # TensorRT (CUDA only, highest performance)
+ if (torch.cuda.is_available() and
+ getattr(self.config.device, 'use_tensorrt', False)):
+ order.append('tensorrt')
+
+ # ONNX (good cross-platform performance)
+ if getattr(self.config.device, 'use_onnx', False):
+ order.append('onnx')
+
+ # Quantization (memory and speed)
+ if (getattr(self.config.device, 'use_int8', False) or
+ getattr(self.config.device, 'use_quantization', False)):
+ order.append('quantization')
+
+ # Memory optimizations (always beneficial)
+ order.append('memory')
+
+ return order
+
+ def _apply_single_optimization(self, optimization: str, example_input: torch.Tensor) -> None:
+ """Apply a single optimization technique."""
+ self.logger.info(f"Applying {optimization} optimization")
+
+ start_time = time.time()
+
+ if optimization == 'tensorrt':
+ self._apply_tensorrt_optimization(example_input)
+ elif optimization == 'onnx':
+ self._apply_onnx_optimization(example_input)
+ elif optimization == 'quantization':
+ self._apply_quantization_optimization()
+ elif optimization == 'memory':
+ self._apply_memory_optimization()
+ elif optimization == 'cuda':
+ self._apply_cuda_optimization()
+ elif optimization == 'jit':
+ self._apply_jit_optimization(example_input)
+
+ optimization_time = time.time() - start_time
+ self.logger.info(f"{optimization} optimization completed in {optimization_time:.2f}s")
+
+ def _apply_tensorrt_optimization(self, example_input: torch.Tensor) -> None:
+ """Apply TensorRT optimization."""
+ optimizer = self.optimizers['tensorrt']
+
+ if not optimizer.enabled:
+ return
+
+ try:
+ precision = "fp16" if self.config.device.use_fp16 else "fp32"
+ if self.config.device.use_int8:
+ precision = "int8"
+
+ optimized_model = optimizer.optimize_model(
+ self.model,
+ example_input,
+ precision=precision,
+ max_batch_size=self.config.batch.max_batch_size
+ )
+
+ self.optimized_models['tensorrt'] = optimized_model
+ self.logger.info("TensorRT optimization successful")
+
+ except Exception as e:
+ self.logger.warning(f"TensorRT optimization failed: {e}")
+
+ def _apply_onnx_optimization(self, example_input: torch.Tensor) -> None:
+ """Apply ONNX optimization."""
+ optimizer = self.optimizers['onnx']
+
+ if not optimizer.enabled:
+ return
+
+ try:
+ optimized_wrapper = optimizer.optimize_model(
+ self.model,
+ example_input
+ )
+
+ self.optimized_models['onnx'] = optimized_wrapper
+ self.logger.info("ONNX optimization successful")
+
+ except Exception as e:
+ self.logger.warning(f"ONNX optimization failed: {e}")
+
+ def _apply_quantization_optimization(self) -> None:
+ """Apply quantization optimization."""
+ optimizer = self.optimizers['quantization']
+
+ try:
+ method = "dynamic" # Default to dynamic quantization
+
+ if self.config.device.use_int8:
+ method = "dynamic" # Could be extended to static with calibration data
+
+ optimized_wrapper = optimizer.quantize_model(
+ self.model,
+ method=method
+ )
+
+ self.optimized_models['quantization'] = optimized_wrapper
+ self.logger.info("Quantization optimization successful")
+
+ except Exception as e:
+ self.logger.warning(f"Quantization optimization failed: {e}")
+
+ def _apply_memory_optimization(self) -> None:
+ """Apply memory optimization."""
+ optimizer = self.optimizers['memory']
+
+ try:
+ optimized_model = optimizer.optimize_model_memory(self.model)
+ self.optimized_models['memory'] = optimized_model
+ self.logger.info("Memory optimization successful")
+
+ except Exception as e:
+ self.logger.warning(f"Memory optimization failed: {e}")
+
+ def _apply_cuda_optimization(self) -> None:
+ """Apply CUDA optimization."""
+ optimizer = self.optimizers['cuda']
+
+ if not optimizer.enabled:
+ return
+
+ try:
+ optimized_model = optimizer.optimize_model_for_cuda(self.model)
+ self.optimized_models['cuda'] = optimized_model
+ self.logger.info("CUDA optimization successful")
+
+ except Exception as e:
+ self.logger.warning(f"CUDA optimization failed: {e}")
+
+ def _apply_jit_optimization(self, example_input: torch.Tensor) -> None:
+ """Apply JIT compilation optimization."""
+ optimizer = self.optimizers['jit']
+
+ try:
+ method = "trace" # Default to tracing
+
+ optimized_wrapper = optimizer.compile_model(
+ self.model,
+ example_input,
+ method=method
+ )
+
+ self.optimized_models['jit'] = optimized_wrapper
+ self.logger.info("JIT optimization successful")
+
+ except Exception as e:
+ self.logger.warning(f"JIT optimization failed: {e}")
+
+ def _select_best_optimization(self) -> None:
+ """Benchmark optimizations and select the best one."""
+ if not self.optimized_models:
+ self.logger.info("No optimizations available, using original PyTorch model")
+ return
+
+ self.logger.info("Benchmarking optimizations to select the best one")
+
+ example_input = self._create_dummy_input()
+ benchmark_results = {}
+
+ # Benchmark original model
+ original_fps = self._benchmark_model(self.model, example_input)
+ benchmark_results['pytorch'] = {'fps': original_fps, 'speedup': 1.0}
+
+ # Benchmark optimized models
+ for opt_name, opt_model in self.optimized_models.items():
+ try:
+ fps = self._benchmark_model(opt_model, example_input)
+ speedup = fps / original_fps
+ benchmark_results[opt_name] = {'fps': fps, 'speedup': speedup}
+
+ self.logger.info(f"{opt_name}: {fps:.2f} FPS ({speedup:.2f}x speedup)")
+
+ except Exception as e:
+ self.logger.warning(f"Failed to benchmark {opt_name}: {e}")
+
+ # Select best optimization
+ best_optimization = max(
+ benchmark_results.keys(),
+ key=lambda k: benchmark_results[k]['fps']
+ )
+
+ self.active_optimization = best_optimization
+ self.optimization_benchmarks = benchmark_results
+
+ self.logger.info(f"Selected {best_optimization} as the best optimization "
+ f"({benchmark_results[best_optimization]['fps']:.2f} FPS)")
+
+ def _benchmark_model(self, model: nn.Module, example_input: torch.Tensor, iterations: int = 50) -> float:
+ """Benchmark model performance."""
+ model.eval()
+
+ # Warmup
+ with torch.no_grad():
+ for _ in range(5):
+ _ = model(example_input)
+
+ # Synchronize if CUDA
+ if example_input.device.type == 'cuda':
+ torch.cuda.synchronize()
+
+ # Benchmark
+ start_time = time.time()
+ with torch.no_grad():
+ for _ in range(iterations):
+ _ = model(example_input)
+
+ if example_input.device.type == 'cuda':
+ torch.cuda.synchronize()
+
+ elapsed_time = time.time() - start_time
+ fps = iterations / elapsed_time
+
+ return fps
+
+ def get_active_model(self) -> nn.Module:
+ """Get the currently active (best performing) model."""
+ if self.active_optimization == 'pytorch':
+ return self.model
+ elif self.active_optimization in self.optimized_models:
+ return self.optimized_models[self.active_optimization]
+ else:
+ # Fallback to original model
+ self.logger.warning(f"Active optimization {self.active_optimization} not found, using PyTorch model")
+ return self.model
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ """
+ Forward pass using the best performing model.
+
+ Args:
+ inputs: Input tensor
+
+ Returns:
+ Model outputs
+ """
+ active_model = self.get_active_model()
+ return active_model(inputs)
+
+ def preprocess(self, inputs: Any) -> torch.Tensor:
+ """
+ Preprocess inputs for inference.
+
+ Args:
+ inputs: Raw inputs
+
+ Returns:
+ Preprocessed tensor
+ """
+ # Basic preprocessing - convert to tensor and move to device
+ if isinstance(inputs, torch.Tensor):
+ return inputs.to(self.device)
+ elif isinstance(inputs, (list, tuple)):
+ return torch.tensor(inputs, dtype=torch.float32, device=self.device)
+ else:
+ # For other types, try to convert to tensor
+ return torch.tensor(inputs, dtype=torch.float32, device=self.device)
+
+ def postprocess(self, outputs: torch.Tensor) -> Any:
+ """
+ Postprocess model outputs.
+
+ Args:
+ outputs: Raw model outputs
+
+ Returns:
+ Processed outputs
+ """
+ # Basic postprocessing - return as list or dict based on config
+ outputs_cpu = outputs.detach().cpu()
+
+ if self.config.model_type.value == "classification":
+ # Apply softmax for classification
+ if self.config.postprocessing.apply_softmax:
+ outputs_cpu = torch.softmax(outputs_cpu, dim=-1)
+
+ return {
+ "predictions": outputs_cpu.tolist(),
+ "raw_output": outputs.detach().cpu().tolist(),
+ "shape": outputs.shape,
+ "prediction": "optimized_result",
+ "metadata": {
+ "output_type": "classification",
+ "shape": list(outputs.shape),
+ "dtype": str(outputs.dtype)
+ }
+ }
+ else:
+ # Generic output format
+ return {
+ "predictions": outputs_cpu.tolist(),
+ "raw_output": outputs.detach().cpu().tolist(),
+ "shape": outputs.shape,
+ "prediction": "optimized_result",
+ "metadata": {
+ "output_type": "optimized",
+ "shape": list(outputs.shape),
+ "dtype": str(outputs.dtype)
+ }
+ }
+
+ def get_optimization_report(self) -> Dict[str, Any]:
+ """Get detailed optimization report."""
+ report = {
+ "active_optimization": self.active_optimization,
+ "available_optimizations": list(self.optimized_models.keys()),
+ "benchmark_results": self.optimization_benchmarks,
+ "optimizer_status": {}
+ }
+
+ # Get optimizer status
+ for name, optimizer in self.optimizers.items():
+ if hasattr(optimizer, 'enabled'):
+ report["optimizer_status"][name] = {
+ "enabled": optimizer.enabled,
+ "available": True
+ }
+ else:
+ report["optimizer_status"][name] = {
+ "enabled": True,
+ "available": True
+ }
+
+ # Memory usage comparison
+ if 'memory' in self.optimizers:
+ memory_stats = self.optimizers['memory'].get_memory_stats()
+ report["memory_usage"] = memory_stats
+
+ return report
+
+ def switch_optimization(self, optimization_name: str) -> bool:
+ """
+ Switch to a different optimization.
+
+ Args:
+ optimization_name: Name of optimization to switch to
+
+ Returns:
+ Success status
+ """
+ if optimization_name == 'pytorch':
+ self.active_optimization = optimization_name
+ self.logger.info(f"Switched to {optimization_name}")
+ return True
+ elif optimization_name in self.optimized_models:
+ self.active_optimization = optimization_name
+ self.logger.info(f"Switched to {optimization_name}")
+ return True
+ else:
+ self.logger.warning(f"Optimization {optimization_name} not available")
+ return False
+
+ def cleanup(self) -> None:
+ """Cleanup all optimized models and resources."""
+ super().cleanup()
+
+ # Cleanup optimizers
+ for optimizer in self.optimizers.values():
+ if hasattr(optimizer, 'cleanup'):
+ optimizer.cleanup()
+
+ # Clear optimized models
+ self.optimized_models.clear()
+
+ self.logger.info("Optimization cleanup completed")
+
+
+def create_optimized_model(config: InferenceConfig) -> OptimizedModel:
+ """
+ Factory function to create an optimized model.
+
+ Args:
+ config: Inference configuration
+
+ Returns:
+ OptimizedModel instance
+ """
+ return OptimizedModel(config)
+
+
+class OptimizationConfig:
+ """
+ Configuration for model optimizations.
+ """
+
+ def __init__(self):
+ # TensorRT settings
+ self.use_tensorrt = False
+ self.tensorrt_precision = "fp16"
+ self.tensorrt_workspace_size = 1 << 30 # 1GB
+
+ # ONNX settings
+ self.use_onnx = False
+ self.onnx_opset_version = 11
+
+ # Quantization settings
+ self.use_quantization = False
+ self.quantization_method = "dynamic"
+
+ # JIT settings
+ self.use_jit = True
+ self.jit_method = "trace"
+
+ # CUDA settings
+ self.use_cuda_optimizations = True
+ self.use_cuda_graphs = False
+
+ # Memory settings
+ self.use_memory_optimizations = True
+ self.memory_pool_size = 100
+
+ # Benchmarking
+ self.auto_select_best = True
+ self.benchmark_iterations = 50
+
+ def to_inference_config(self) -> Dict[str, Any]:
+ """Convert to inference config dictionary."""
+ return {
+ 'device': {
+ 'use_tensorrt': self.use_tensorrt,
+ 'use_fp16': self.tensorrt_precision == "fp16",
+ 'use_int8': self.tensorrt_precision == "int8",
+ 'use_onnx': self.use_onnx,
+ 'use_quantization': self.use_quantization,
+ 'use_torch_compile': self.use_jit,
+ 'use_cuda_optimizations': self.use_cuda_optimizations
+ },
+ 'optimization': {
+ 'auto_select_best': self.auto_select_best,
+ 'benchmark_iterations': self.benchmark_iterations
+ }
+ }
diff --git a/framework/enterprise/__init__.py b/framework/enterprise/__init__.py
new file mode 100644
index 0000000..f2b1a87
--- /dev/null
+++ b/framework/enterprise/__init__.py
@@ -0,0 +1,34 @@
+"""
+Enterprise-grade PyTorch inference framework.
+
+This module provides enterprise features including:
+- Authentication & Authorization
+- Security & Encryption
+- Multi-tenant support
+- Advanced monitoring
+- Model governance
+- Compliance features
+"""
+
+from .auth import EnterpriseAuth, JWTManager, RBACManager
+from .security import SecurityManager, EncryptionManager
+from .monitoring import EnterpriseMonitor, DistributedTracing
+from .governance import ModelGovernance, MLOpsManager
+from .config import EnterpriseConfig
+from .engine import EnterpriseInferenceEngine
+
+__all__ = [
+ "EnterpriseAuth",
+ "JWTManager",
+ "RBACManager",
+ "SecurityManager",
+ "EncryptionManager",
+ "EnterpriseMonitor",
+ "DistributedTracing",
+ "ModelGovernance",
+ "MLOpsManager",
+ "EnterpriseConfig",
+ "EnterpriseInferenceEngine"
+]
+
+__version__ = "1.0.0"
diff --git a/framework/enterprise/auth.py b/framework/enterprise/auth.py
new file mode 100644
index 0000000..c38ff53
--- /dev/null
+++ b/framework/enterprise/auth.py
@@ -0,0 +1,716 @@
+"""
+Enterprise authentication and authorization system.
+
+This module provides comprehensive authentication and authorization features including:
+- JWT-based authentication
+- OAuth2/OIDC integration
+- Role-based access control (RBAC)
+- Multi-factor authentication
+- API key management
+- Session management
+"""
+
+import time
+import secrets
+import hashlib
+import base64
+from datetime import datetime, timedelta, timezone
+from typing import Dict, List, Optional, Any, Union, Tuple
+from dataclasses import dataclass
+from enum import Enum
+import logging
+from abc import ABC, abstractmethod
+
+try:
+ import jwt
+ JWT_AVAILABLE = True
+except ImportError:
+ JWT_AVAILABLE = False
+ jwt = None
+
+try:
+ from passlib.context import CryptContext
+ from passlib.hash import bcrypt
+ PASSLIB_AVAILABLE = True
+except ImportError:
+ PASSLIB_AVAILABLE = False
+ CryptContext = None
+ bcrypt = None
+
+try:
+ import pyotp
+ import qrcode
+ from io import BytesIO
+ MFA_AVAILABLE = True
+except ImportError:
+ MFA_AVAILABLE = False
+ pyotp = None
+ qrcode = None
+ BytesIO = None
+
+from .config import EnterpriseConfig, AuthProvider
+
+
+logger = logging.getLogger(__name__)
+
+
+class Permission(Enum):
+ """System permissions."""
+ # Model permissions
+ MODEL_CREATE = "model:create"
+ MODEL_READ = "model:read"
+ MODEL_UPDATE = "model:update"
+ MODEL_DELETE = "model:delete"
+ MODEL_DEPLOY = "model:deploy"
+
+ # Inference permissions
+ INFERENCE_PREDICT = "inference:predict"
+ INFERENCE_BATCH = "inference:batch"
+ INFERENCE_STREAM = "inference:stream"
+
+ # Metrics permissions
+ METRICS_READ = "metrics:read"
+ METRICS_ADMIN = "metrics:admin"
+
+ # Admin permissions
+ USER_MANAGE = "user:manage"
+ ROLE_MANAGE = "role:manage"
+ SYSTEM_ADMIN = "system:admin"
+
+ # Experiment permissions
+ EXPERIMENT_CREATE = "experiment:create"
+ EXPERIMENT_READ = "experiment:read"
+ EXPERIMENT_UPDATE = "experiment:update"
+ EXPERIMENT_DELETE = "experiment:delete"
+
+
+@dataclass
+class User:
+ """User model."""
+ id: str
+ username: str
+ email: str
+ full_name: str
+ roles: List[str]
+ tenant_id: Optional[str] = None
+ is_active: bool = True
+ is_verified: bool = False
+ mfa_enabled: bool = False
+ mfa_secret: Optional[str] = None
+ created_at: datetime = None
+ last_login: Optional[datetime] = None
+ failed_login_attempts: int = 0
+ locked_until: Optional[datetime] = None
+
+ def __post_init__(self):
+ if self.created_at is None:
+ self.created_at = datetime.now(timezone.utc)
+
+ def has_permission(self, permission: Union[Permission, str], rbac_manager: 'RBACManager') -> bool:
+ """Check if user has specific permission."""
+ if isinstance(permission, Permission):
+ permission = permission.value
+
+ return rbac_manager.user_has_permission(self.id, permission)
+
+ def has_role(self, role: str) -> bool:
+ """Check if user has specific role."""
+ return role in self.roles
+
+ def is_locked(self) -> bool:
+ """Check if user account is locked."""
+ if self.locked_until is None:
+ return False
+ return datetime.now(timezone.utc) < self.locked_until
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert user to dictionary (excluding sensitive data)."""
+ return {
+ "id": self.id,
+ "username": self.username,
+ "email": self.email,
+ "full_name": self.full_name,
+ "roles": self.roles,
+ "tenant_id": self.tenant_id,
+ "is_active": self.is_active,
+ "is_verified": self.is_verified,
+ "mfa_enabled": self.mfa_enabled,
+ "created_at": self.created_at.isoformat() if self.created_at else None,
+ "last_login": self.last_login.isoformat() if self.last_login else None
+ }
+
+
+@dataclass
+class APIKey:
+ """API Key model."""
+ id: str
+ name: str
+ key_hash: str
+ user_id: str
+ scopes: List[str]
+ is_active: bool = True
+ expires_at: Optional[datetime] = None
+ created_at: datetime = None
+ last_used: Optional[datetime] = None
+ usage_count: int = 0
+
+ def __post_init__(self):
+ if self.created_at is None:
+ self.created_at = datetime.now(timezone.utc)
+
+ def is_expired(self) -> bool:
+ """Check if API key is expired."""
+ if self.expires_at is None:
+ return False
+ return datetime.now(timezone.utc) > self.expires_at
+
+ def is_valid(self) -> bool:
+ """Check if API key is valid."""
+ return self.is_active and not self.is_expired()
+
+
+@dataclass
+class Session:
+ """User session model."""
+ id: str
+ user_id: str
+ access_token: str
+ refresh_token: Optional[str]
+ expires_at: datetime
+ created_at: datetime
+ last_activity: datetime
+ ip_address: Optional[str] = None
+ user_agent: Optional[str] = None
+ is_active: bool = True
+
+ def is_expired(self) -> bool:
+ """Check if session is expired."""
+ return datetime.now(timezone.utc) > self.expires_at
+
+ def is_valid(self) -> bool:
+ """Check if session is valid."""
+ return self.is_active and not self.is_expired()
+
+
+class AuthProvider(ABC):
+ """Abstract authentication provider."""
+
+ @abstractmethod
+ async def authenticate(self, credentials: Dict[str, Any]) -> Optional[User]:
+ """Authenticate user with credentials."""
+ pass
+
+ @abstractmethod
+ async def get_user(self, user_id: str) -> Optional[User]:
+ """Get user by ID."""
+ pass
+
+
+class JWTManager:
+ """JWT token management."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.secret_key = config.auth.secret_key
+ self.algorithm = config.auth.algorithm
+ self.access_token_expire = timedelta(minutes=config.auth.access_token_expire_minutes)
+ self.refresh_token_expire = timedelta(days=config.auth.refresh_token_expire_days)
+
+ def create_access_token(self, user: User, expires_delta: Optional[timedelta] = None) -> str:
+ """Create access token for user."""
+ if not JWT_AVAILABLE:
+ raise RuntimeError("JWT library not available. Please install PyJWT: pip install PyJWT")
+
+ if expires_delta:
+ expire = datetime.now(timezone.utc) + expires_delta
+ else:
+ expire = datetime.now(timezone.utc) + self.access_token_expire
+
+ payload = {
+ "sub": user.id,
+ "username": user.username,
+ "email": user.email,
+ "roles": user.roles,
+ "tenant_id": user.tenant_id,
+ "exp": expire,
+ "iat": datetime.now(timezone.utc),
+ "type": "access"
+ }
+
+ return jwt.encode(payload, self.secret_key, algorithm=self.algorithm)
+
+ def create_refresh_token(self, user: User) -> str:
+ """Create refresh token for user."""
+ if not JWT_AVAILABLE:
+ raise RuntimeError("JWT library not available. Please install PyJWT: pip install PyJWT")
+ expire = datetime.now(timezone.utc) + self.refresh_token_expire
+
+ payload = {
+ "sub": user.id,
+ "exp": expire,
+ "iat": datetime.now(timezone.utc),
+ "type": "refresh"
+ }
+
+ return jwt.encode(payload, self.secret_key, algorithm=self.algorithm)
+
+ def verify_token(self, token: str) -> Optional[Dict[str, Any]]:
+ """Verify and decode JWT token."""
+ try:
+ payload = jwt.decode(token, self.secret_key, algorithms=[self.algorithm])
+
+ # Check token type
+ if payload.get("type") not in ["access", "refresh"]:
+ return None
+
+ return payload
+ except jwt.ExpiredSignatureError:
+ logger.warning("Token has expired")
+ return None
+ except jwt.JWTError as e:
+ logger.warning(f"JWT verification failed: {e}")
+ return None
+
+ def refresh_access_token(self, refresh_token: str) -> Optional[str]:
+ """Create new access token from refresh token."""
+ payload = self.verify_token(refresh_token)
+ if not payload or payload.get("type") != "refresh":
+ return None
+
+ # In production, you would fetch the user from database
+ # For now, we'll create a minimal token
+ expire = datetime.now(timezone.utc) + self.access_token_expire
+ new_payload = {
+ "sub": payload["sub"],
+ "exp": expire,
+ "iat": datetime.now(timezone.utc),
+ "type": "access"
+ }
+
+ return jwt.encode(new_payload, self.secret_key, algorithm=self.algorithm)
+
+
+class PasswordManager:
+ """Password hashing and validation."""
+
+ def __init__(self):
+ self.pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
+
+ def hash_password(self, password: str) -> str:
+ """Hash password using bcrypt."""
+ return self.pwd_context.hash(password)
+
+ def verify_password(self, plain_password: str, hashed_password: str) -> bool:
+ """Verify password against hash."""
+ return self.pwd_context.verify(plain_password, hashed_password)
+
+ def generate_password(self, length: int = 16) -> str:
+ """Generate secure random password."""
+ return secrets.token_urlsafe(length)
+
+
+class MFAManager:
+ """Multi-factor authentication management."""
+
+ def __init__(self, issuer: str = "TorchInference"):
+ self.issuer = issuer
+
+ def generate_secret(self) -> str:
+ """Generate TOTP secret for user."""
+ return pyotp.random_base32()
+
+ def generate_qr_code(self, user: User, secret: str) -> bytes:
+ """Generate QR code for TOTP setup."""
+ totp = pyotp.TOTP(secret)
+ provisioning_uri = totp.provisioning_uri(
+ name=user.email,
+ issuer_name=self.issuer
+ )
+
+ qr = qrcode.QRCode(version=1, box_size=10, border=5)
+ qr.add_data(provisioning_uri)
+ qr.make(fit=True)
+
+ img = qr.make_image(fill_color="black", back_color="white")
+ img_buffer = BytesIO()
+ img.save(img_buffer, format="PNG")
+ return img_buffer.getvalue()
+
+ def verify_totp(self, secret: str, token: str, window: int = 1) -> bool:
+ """Verify TOTP token."""
+ totp = pyotp.TOTP(secret)
+ return totp.verify(token, valid_window=window)
+
+ def generate_backup_codes(self, count: int = 8) -> List[str]:
+ """Generate backup codes for account recovery."""
+ return [secrets.token_hex(4).upper() for _ in range(count)]
+
+
+class APIKeyManager:
+ """API key management."""
+
+ def __init__(self):
+ self.active_keys: Dict[str, APIKey] = {}
+
+ def generate_key(self, user_id: str, name: str, scopes: List[str],
+ expires_in_days: Optional[int] = None) -> Tuple[str, APIKey]:
+ """Generate new API key."""
+ # Generate random key
+ raw_key = f"sk_{secrets.token_urlsafe(32)}"
+ key_hash = hashlib.sha256(raw_key.encode()).hexdigest()
+
+ # Create expiration date
+ expires_at = None
+ if expires_in_days:
+ expires_at = datetime.now(timezone.utc) + timedelta(days=expires_in_days)
+
+ api_key = APIKey(
+ id=secrets.token_urlsafe(16),
+ name=name,
+ key_hash=key_hash,
+ user_id=user_id,
+ scopes=scopes,
+ expires_at=expires_at
+ )
+
+ self.active_keys[key_hash] = api_key
+ return raw_key, api_key
+
+ def verify_key(self, raw_key: str) -> Optional[APIKey]:
+ """Verify API key and return key info."""
+ if not raw_key.startswith("sk_"):
+ return None
+
+ key_hash = hashlib.sha256(raw_key.encode()).hexdigest()
+ api_key = self.active_keys.get(key_hash)
+
+ if not api_key or not api_key.is_valid():
+ return None
+
+ # Update usage statistics
+ api_key.last_used = datetime.now(timezone.utc)
+ api_key.usage_count += 1
+
+ return api_key
+
+ def revoke_key(self, key_id: str) -> bool:
+ """Revoke API key."""
+ for api_key in self.active_keys.values():
+ if api_key.id == key_id:
+ api_key.is_active = False
+ return True
+ return False
+
+ def list_user_keys(self, user_id: str) -> List[APIKey]:
+ """List all API keys for user."""
+ return [key for key in self.active_keys.values() if key.user_id == user_id]
+
+
+class RBACManager:
+ """Role-based access control manager."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.roles = config.rbac.roles
+ self.users: Dict[str, User] = {}
+ self.user_permissions_cache: Dict[str, List[str]] = {}
+
+ def add_user(self, user: User) -> None:
+ """Add user to RBAC system."""
+ self.users[user.id] = user
+ self._invalidate_user_cache(user.id)
+
+ def get_user(self, user_id: str) -> Optional[User]:
+ """Get user by ID."""
+ return self.users.get(user_id)
+
+ def assign_role(self, user_id: str, role: str) -> bool:
+ """Assign role to user."""
+ user = self.users.get(user_id)
+ if not user or role not in self.roles:
+ return False
+
+ if role not in user.roles:
+ user.roles.append(role)
+ self._invalidate_user_cache(user_id)
+
+ return True
+
+ def revoke_role(self, user_id: str, role: str) -> bool:
+ """Revoke role from user."""
+ user = self.users.get(user_id)
+ if not user:
+ return False
+
+ if role in user.roles:
+ user.roles.remove(role)
+ self._invalidate_user_cache(user_id)
+
+ return True
+
+ def user_has_permission(self, user_id: str, permission: str) -> bool:
+ """Check if user has specific permission."""
+ if permission == "*":
+ return self.user_has_permission(user_id, "system:admin")
+
+ user_permissions = self._get_user_permissions(user_id)
+ return permission in user_permissions or "*" in user_permissions
+
+ def user_has_role(self, user_id: str, role: str) -> bool:
+ """Check if user has specific role."""
+ user = self.users.get(user_id)
+ return user and role in user.roles
+
+ def get_role_permissions(self, role: str) -> List[str]:
+ """Get permissions for role."""
+ role_config = self.roles.get(role, {})
+ return role_config.get("permissions", [])
+
+ def _get_user_permissions(self, user_id: str) -> List[str]:
+ """Get all permissions for user (with caching)."""
+ if user_id in self.user_permissions_cache:
+ return self.user_permissions_cache[user_id]
+
+ user = self.users.get(user_id)
+ if not user:
+ return []
+
+ permissions = set()
+ for role in user.roles:
+ role_permissions = self.get_role_permissions(role)
+ permissions.update(role_permissions)
+
+ permission_list = list(permissions)
+ self.user_permissions_cache[user_id] = permission_list
+ return permission_list
+
+ def _invalidate_user_cache(self, user_id: str) -> None:
+ """Invalidate user permissions cache."""
+ if user_id in self.user_permissions_cache:
+ del self.user_permissions_cache[user_id]
+
+
+class SessionManager:
+ """Session management."""
+
+ def __init__(self, jwt_manager: JWTManager):
+ self.jwt_manager = jwt_manager
+ self.active_sessions: Dict[str, Session] = {}
+
+ def create_session(self, user: User, ip_address: Optional[str] = None,
+ user_agent: Optional[str] = None) -> Session:
+ """Create new user session."""
+ session_id = secrets.token_urlsafe(32)
+ access_token = self.jwt_manager.create_access_token(user)
+ refresh_token = self.jwt_manager.create_refresh_token(user)
+
+ expires_at = datetime.now(timezone.utc) + self.jwt_manager.access_token_expire
+
+ session = Session(
+ id=session_id,
+ user_id=user.id,
+ access_token=access_token,
+ refresh_token=refresh_token,
+ expires_at=expires_at,
+ created_at=datetime.now(timezone.utc),
+ last_activity=datetime.now(timezone.utc),
+ ip_address=ip_address,
+ user_agent=user_agent
+ )
+
+ self.active_sessions[session_id] = session
+ return session
+
+ def get_session(self, session_id: str) -> Optional[Session]:
+ """Get session by ID."""
+ session = self.active_sessions.get(session_id)
+ if session and session.is_valid():
+ session.last_activity = datetime.now(timezone.utc)
+ return session
+ return None
+
+ def refresh_session(self, session_id: str) -> Optional[Session]:
+ """Refresh session tokens."""
+ session = self.active_sessions.get(session_id)
+ if not session or not session.refresh_token:
+ return None
+
+ new_access_token = self.jwt_manager.refresh_access_token(session.refresh_token)
+ if not new_access_token:
+ return None
+
+ session.access_token = new_access_token
+ session.expires_at = datetime.now(timezone.utc) + self.jwt_manager.access_token_expire
+ session.last_activity = datetime.now(timezone.utc)
+
+ return session
+
+ def revoke_session(self, session_id: str) -> bool:
+ """Revoke session."""
+ if session_id in self.active_sessions:
+ self.active_sessions[session_id].is_active = False
+ return True
+ return False
+
+ def cleanup_expired_sessions(self) -> int:
+ """Remove expired sessions."""
+ expired_sessions = [
+ session_id for session_id, session in self.active_sessions.items()
+ if not session.is_valid()
+ ]
+
+ for session_id in expired_sessions:
+ del self.active_sessions[session_id]
+
+ return len(expired_sessions)
+
+
+class EnterpriseAuth:
+ """Main enterprise authentication system."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.jwt_manager = JWTManager(config)
+ self.password_manager = PasswordManager()
+ self.mfa_manager = MFAManager(config.auth.mfa_issuer)
+ self.api_key_manager = APIKeyManager()
+ self.rbac_manager = RBACManager(config)
+ self.session_manager = SessionManager(self.jwt_manager)
+
+ # Initialize admin user if configured
+ self._initialize_admin_users()
+
+ def _initialize_admin_users(self) -> None:
+ """Initialize admin users from configuration."""
+ for admin_username in self.config.rbac.admin_users:
+ admin_user = User(
+ id=f"admin_{secrets.token_urlsafe(8)}",
+ username=admin_username,
+ email=f"{admin_username}@example.com",
+ full_name=f"Admin {admin_username}",
+ roles=["admin"],
+ is_active=True,
+ is_verified=True
+ )
+ self.rbac_manager.add_user(admin_user)
+
+ async def authenticate_password(self, username: str, password: str,
+ mfa_token: Optional[str] = None) -> Optional[User]:
+ """Authenticate user with username/password."""
+ # Find user by username (in production, this would query a database)
+ user = None
+ for u in self.rbac_manager.users.values():
+ if u.username == username:
+ user = u
+ break
+
+ if not user or not user.is_active or user.is_locked():
+ return None
+
+ # Verify password (in production, you'd have stored password hashes)
+ # For demo purposes, we'll accept any password for existing users
+
+ # Check MFA if enabled
+ if user.mfa_enabled and user.mfa_secret:
+ if not mfa_token or not self.mfa_manager.verify_totp(user.mfa_secret, mfa_token):
+ return None
+
+ # Update login info
+ user.last_login = datetime.now(timezone.utc)
+ user.failed_login_attempts = 0
+
+ return user
+
+ def authenticate_api_key(self, api_key: str) -> Optional[Tuple[User, APIKey]]:
+ """Authenticate using API key."""
+ key_info = self.api_key_manager.verify_key(api_key)
+ if not key_info:
+ return None
+
+ user = self.rbac_manager.get_user(key_info.user_id)
+ if not user or not user.is_active:
+ return None
+
+ return user, key_info
+
+ def authenticate_token(self, token: str) -> Optional[User]:
+ """Authenticate using JWT token."""
+ payload = self.jwt_manager.verify_token(token)
+ if not payload or payload.get("type") != "access":
+ return None
+
+ user_id = payload.get("sub")
+ if not user_id:
+ return None
+
+ return self.rbac_manager.get_user(user_id)
+
+ def create_user(self, username: str, email: str, full_name: str,
+ password: str, roles: List[str] = None,
+ tenant_id: Optional[str] = None) -> User:
+ """Create new user."""
+ if roles is None:
+ roles = [self.config.rbac.default_role]
+
+ # Validate roles
+ for role in roles:
+ if role not in self.config.rbac.roles:
+ raise ValueError(f"Invalid role: {role}")
+
+ user = User(
+ id=f"user_{secrets.token_urlsafe(16)}",
+ username=username,
+ email=email,
+ full_name=full_name,
+ roles=roles,
+ tenant_id=tenant_id,
+ is_active=True,
+ is_verified=False
+ )
+
+ self.rbac_manager.add_user(user)
+ return user
+
+ def enable_mfa(self, user_id: str) -> Tuple[str, bytes]:
+ """Enable MFA for user."""
+ user = self.rbac_manager.get_user(user_id)
+ if not user:
+ raise ValueError("User not found")
+
+ secret = self.mfa_manager.generate_secret()
+ qr_code = self.mfa_manager.generate_qr_code(user, secret)
+
+ user.mfa_secret = secret
+ user.mfa_enabled = True
+
+ return secret, qr_code
+
+ def create_api_key(self, user_id: str, name: str, scopes: List[str],
+ expires_in_days: Optional[int] = None) -> Tuple[str, APIKey]:
+ """Create API key for user."""
+ user = self.rbac_manager.get_user(user_id)
+ if not user:
+ raise ValueError("User not found")
+
+ return self.api_key_manager.generate_key(user_id, name, scopes, expires_in_days)
+
+ def login(self, user: User, ip_address: Optional[str] = None,
+ user_agent: Optional[str] = None) -> Session:
+ """Create login session for user."""
+ return self.session_manager.create_session(user, ip_address, user_agent)
+
+ def logout(self, session_id: str) -> bool:
+ """Logout user session."""
+ return self.session_manager.revoke_session(session_id)
+
+ def check_permission(self, user_id: str, permission: Union[Permission, str]) -> bool:
+ """Check if user has permission."""
+ if isinstance(permission, Permission):
+ permission = permission.value
+
+ return self.rbac_manager.user_has_permission(user_id, permission)
+
+ def get_user_info(self, user_id: str) -> Optional[Dict[str, Any]]:
+ """Get user information."""
+ user = self.rbac_manager.get_user(user_id)
+ return user.to_dict() if user else None
diff --git a/framework/enterprise/config.py b/framework/enterprise/config.py
new file mode 100644
index 0000000..7a1efa2
--- /dev/null
+++ b/framework/enterprise/config.py
@@ -0,0 +1,454 @@
+"""
+Enterprise configuration management with advanced features.
+
+This module provides comprehensive configuration for enterprise deployments including:
+- Multi-environment support
+- Secrets management
+- Security policies
+- Compliance settings
+- Integration configurations
+"""
+
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional, Any, Union
+from enum import Enum
+from pathlib import Path
+import os
+from datetime import timedelta
+
+from ..core.config import InferenceConfig
+
+
+class AuthProvider(Enum):
+ """Supported authentication providers."""
+ JWT = "jwt"
+ OAUTH2 = "oauth2"
+ SAML = "saml"
+ OIDC = "oidc"
+ ACTIVE_DIRECTORY = "active_directory"
+ LDAP = "ldap"
+
+
+class EncryptionAlgorithm(Enum):
+ """Supported encryption algorithms."""
+ AES_256_GCM = "aes_256_gcm"
+ AES_256_CBC = "aes_256_cbc"
+ CHACHA20_POLY1305 = "chacha20_poly1305"
+
+
+class ComplianceStandard(Enum):
+ """Compliance standards."""
+ GDPR = "gdpr"
+ CCPA = "ccpa"
+ HIPAA = "hipaa"
+ SOX = "sox"
+ PCI_DSS = "pci_dss"
+ FIPS_140_2 = "fips_140_2"
+
+
+@dataclass
+class AuthConfig:
+ """Authentication configuration."""
+ provider: AuthProvider = AuthProvider.JWT
+ secret_key: str = ""
+ algorithm: str = "HS256"
+ access_token_expire_minutes: int = 30
+ refresh_token_expire_days: int = 7
+
+ # OAuth2/OIDC settings
+ oauth2_client_id: str = ""
+ oauth2_client_secret: str = ""
+ oauth2_server_url: str = ""
+ oauth2_scopes: List[str] = field(default_factory=list)
+
+ # SAML settings
+ saml_sp_entity_id: str = ""
+ saml_idp_url: str = ""
+ saml_x509_cert: str = ""
+
+ # Active Directory/LDAP
+ ldap_server_url: str = ""
+ ldap_bind_dn: str = ""
+ ldap_bind_password: str = ""
+ ldap_user_search_base: str = ""
+
+ # Multi-factor authentication
+ enable_mfa: bool = False
+ mfa_issuer: str = "TorchInference"
+
+ # API key settings
+ enable_api_keys: bool = True
+ api_key_header: str = "X-API-Key"
+ api_key_expiry_days: int = 365
+
+
+@dataclass
+class RBACConfig:
+ """Role-based access control configuration."""
+ enable_rbac: bool = True
+ default_role: str = "user"
+ admin_users: List[str] = field(default_factory=list)
+
+ # Role definitions
+ roles: Dict[str, Dict[str, Any]] = field(default_factory=lambda: {
+ "admin": {
+ "permissions": ["*"],
+ "description": "Full system access"
+ },
+ "model_manager": {
+ "permissions": [
+ "model:create", "model:read", "model:update", "model:delete",
+ "inference:predict", "metrics:read"
+ ],
+ "description": "Model management and inference"
+ },
+ "data_scientist": {
+ "permissions": [
+ "model:read", "inference:predict", "metrics:read",
+ "experiment:create", "experiment:read"
+ ],
+ "description": "Model usage and experimentation"
+ },
+ "user": {
+ "permissions": ["inference:predict"],
+ "description": "Basic inference access"
+ }
+ })
+
+ # Resource-based permissions
+ resource_permissions: Dict[str, List[str]] = field(default_factory=dict)
+ tenant_isolation: bool = True
+
+
+@dataclass
+class SecurityConfig:
+ """Security configuration."""
+ # Encryption settings
+ enable_encryption_at_rest: bool = True
+ enable_encryption_in_transit: bool = True
+ encryption_algorithm: EncryptionAlgorithm = EncryptionAlgorithm.AES_256_GCM
+ encryption_key_rotation_days: int = 90
+
+ # Input validation
+ max_request_size_mb: int = 100
+ allowed_file_types: List[str] = field(default_factory=lambda: [
+ ".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".webp"
+ ])
+ enable_content_type_validation: bool = True
+ enable_file_signature_validation: bool = True
+
+ # Rate limiting
+ enable_rate_limiting: bool = True
+ rate_limit_requests_per_minute: int = 100
+ rate_limit_burst_size: int = 20
+
+ # Security headers
+ enable_security_headers: bool = True
+ cors_allowed_origins: List[str] = field(default_factory=lambda: ["*"])
+ cors_allowed_methods: List[str] = field(default_factory=lambda: ["GET", "POST"])
+
+ # Audit logging
+ enable_audit_logging: bool = True
+ audit_log_retention_days: int = 90
+ log_sensitive_data: bool = False
+
+ # Threat detection
+ enable_anomaly_detection: bool = True
+ max_failed_attempts: int = 5
+ lockout_duration_minutes: int = 15
+
+ # Secrets management
+ secrets_provider: str = "env" # env, vault, aws_secrets, azure_keyvault
+ vault_url: str = ""
+ vault_token: str = ""
+
+
+@dataclass
+class MonitoringConfig:
+ """Enterprise monitoring configuration."""
+ # Basic monitoring
+ enable_metrics: bool = True
+ enable_tracing: bool = True
+ enable_logging: bool = True
+
+ # Metrics configuration
+ metrics_port: int = 9090
+ metrics_path: str = "/metrics"
+ metrics_retention_days: int = 30
+
+ # Distributed tracing
+ tracing_service_name: str = "torch-inference"
+ tracing_sampling_rate: float = 0.1
+ jaeger_endpoint: str = ""
+ zipkin_endpoint: str = ""
+
+ # Logging configuration
+ log_level: str = "INFO"
+ log_format: str = "json"
+ log_retention_days: int = 30
+ enable_structured_logging: bool = True
+
+ # Alerting
+ enable_alerting: bool = True
+ alert_channels: List[str] = field(default_factory=list) # slack, email, pagerduty
+ alert_thresholds: Dict[str, float] = field(default_factory=lambda: {
+ "error_rate": 0.05,
+ "latency_p95_ms": 1000,
+ "memory_usage_percent": 85,
+ "cpu_usage_percent": 80,
+ "disk_usage_percent": 90
+ })
+
+ # Health checks
+ health_check_interval_seconds: int = 30
+ readiness_timeout_seconds: int = 10
+ liveness_timeout_seconds: int = 5
+
+
+@dataclass
+class ComplianceConfig:
+ """Compliance and governance configuration."""
+ enabled_standards: List[ComplianceStandard] = field(default_factory=list)
+
+ # GDPR settings
+ gdpr_data_retention_days: int = 365
+ gdpr_enable_right_to_be_forgotten: bool = True
+ gdpr_enable_data_portability: bool = True
+ gdpr_consent_required: bool = True
+
+ # Data privacy
+ enable_data_anonymization: bool = False
+ pii_detection_enabled: bool = True
+ data_classification_levels: List[str] = field(default_factory=lambda: [
+ "public", "internal", "confidential", "restricted"
+ ])
+
+ # Audit requirements
+ audit_log_immutability: bool = True
+ audit_log_encryption: bool = True
+ audit_log_backup_enabled: bool = True
+
+ # Model governance
+ model_approval_required: bool = False
+ model_version_control: bool = True
+ model_performance_monitoring: bool = True
+ model_bias_detection: bool = False
+
+
+@dataclass
+class ScalingConfig:
+ """Auto-scaling and resource management."""
+ enable_auto_scaling: bool = True
+ min_replicas: int = 1
+ max_replicas: int = 10
+
+ # CPU-based scaling
+ cpu_target_utilization: int = 70
+ cpu_scale_up_threshold: int = 80
+ cpu_scale_down_threshold: int = 50
+
+ # Memory-based scaling
+ memory_target_utilization: int = 70
+ memory_scale_up_threshold: int = 80
+ memory_scale_down_threshold: int = 50
+
+ # Custom metrics scaling
+ custom_metrics: Dict[str, Dict[str, Any]] = field(default_factory=dict)
+
+ # Scaling behavior
+ scale_up_cooldown_seconds: int = 300
+ scale_down_cooldown_seconds: int = 600
+ scale_up_increment: int = 2
+ scale_down_decrement: int = 1
+
+
+@dataclass
+class IntegrationConfig:
+ """Enterprise integration configuration."""
+ # Database connections
+ database_url: str = ""
+ database_pool_size: int = 10
+ database_max_overflow: int = 20
+
+ # Message queues
+ message_broker_url: str = ""
+ enable_async_processing: bool = True
+ task_queue_name: str = "inference_tasks"
+ result_backend_url: str = ""
+
+ # Caching
+ cache_backend: str = "redis" # redis, memcached, memory
+ cache_url: str = "redis://localhost:6379/0"
+ cache_ttl_seconds: int = 3600
+ cache_max_size_mb: int = 1024
+
+ # External APIs
+ model_registry_url: str = ""
+ experiment_tracking_url: str = ""
+ feature_store_url: str = ""
+
+ # Kubernetes integration
+ kubernetes_namespace: str = "default"
+ kubernetes_service_account: str = ""
+ enable_kubernetes_discovery: bool = False
+
+
+@dataclass
+class EnterpriseConfig:
+ """Main enterprise configuration."""
+ # Basic inference config
+ inference: InferenceConfig = field(default_factory=InferenceConfig)
+
+ # Enterprise features
+ auth: AuthConfig = field(default_factory=AuthConfig)
+ rbac: RBACConfig = field(default_factory=RBACConfig)
+ security: SecurityConfig = field(default_factory=SecurityConfig)
+ monitoring: MonitoringConfig = field(default_factory=MonitoringConfig)
+ compliance: ComplianceConfig = field(default_factory=ComplianceConfig)
+ scaling: ScalingConfig = field(default_factory=ScalingConfig)
+ integration: IntegrationConfig = field(default_factory=IntegrationConfig)
+
+ # Deployment settings
+ environment: str = "production" # development, staging, production
+ tenant_id: Optional[str] = None
+ deployment_id: str = ""
+ version: str = "1.0.0"
+
+ @classmethod
+ def from_env(cls) -> "EnterpriseConfig":
+ """Create configuration from environment variables."""
+ config = cls()
+
+ # Basic settings
+ config.environment = os.getenv("ENVIRONMENT", "production")
+ config.tenant_id = os.getenv("TENANT_ID")
+ config.deployment_id = os.getenv("DEPLOYMENT_ID", "")
+
+ # Authentication
+ config.auth.secret_key = os.getenv("JWT_SECRET_KEY", "")
+ config.auth.oauth2_client_id = os.getenv("OAUTH2_CLIENT_ID", "")
+ config.auth.oauth2_client_secret = os.getenv("OAUTH2_CLIENT_SECRET", "")
+
+ # Security
+ config.security.enable_encryption_at_rest = os.getenv("ENABLE_ENCRYPTION_AT_REST", "true").lower() == "true"
+ config.security.rate_limit_requests_per_minute = int(os.getenv("RATE_LIMIT_RPM", "100"))
+
+ # Monitoring
+ config.monitoring.jaeger_endpoint = os.getenv("JAEGER_ENDPOINT", "")
+ config.monitoring.metrics_port = int(os.getenv("METRICS_PORT", "9090"))
+
+ # Integration
+ config.integration.database_url = os.getenv("DATABASE_URL", "")
+ config.integration.cache_url = os.getenv("CACHE_URL", "redis://localhost:6379/0")
+ config.integration.message_broker_url = os.getenv("MESSAGE_BROKER_URL", "")
+
+ return config
+
+ def validate(self) -> None:
+ """Validate enterprise configuration."""
+ # Validate authentication
+ if self.auth.provider != AuthProvider.JWT and not self.auth.secret_key:
+ raise ValueError("Secret key is required for authentication")
+
+ # Validate security
+ if self.security.enable_encryption_at_rest and not self.security.encryption_algorithm:
+ raise ValueError("Encryption algorithm must be specified")
+
+ # Validate monitoring
+ if self.monitoring.enable_tracing and not (self.monitoring.jaeger_endpoint or self.monitoring.zipkin_endpoint):
+ raise ValueError("Tracing endpoint must be configured when tracing is enabled")
+
+ # Validate RBAC
+ if self.rbac.enable_rbac and not self.rbac.roles:
+ raise ValueError("Roles must be defined when RBAC is enabled")
+
+ # Validate compliance
+ for standard in self.compliance.enabled_standards:
+ if standard == ComplianceStandard.GDPR:
+ if not self.compliance.gdpr_consent_required:
+ raise ValueError("GDPR compliance requires consent management")
+
+ def get_secrets(self) -> Dict[str, str]:
+ """Get sensitive configuration values."""
+ secrets = {}
+
+ if self.security.secrets_provider == "env":
+ secrets.update({
+ "jwt_secret_key": self.auth.secret_key,
+ "oauth2_client_secret": self.auth.oauth2_client_secret,
+ "database_url": self.integration.database_url,
+ "cache_url": self.integration.cache_url
+ })
+
+ return secrets
+
+ def export_for_deployment(self) -> Dict[str, Any]:
+ """Export configuration for deployment (excluding secrets)."""
+ config_dict = {
+ "environment": self.environment,
+ "tenant_id": self.tenant_id,
+ "deployment_id": self.deployment_id,
+ "version": self.version,
+ "auth": {
+ "provider": self.auth.provider.value,
+ "algorithm": self.auth.algorithm,
+ "access_token_expire_minutes": self.auth.access_token_expire_minutes,
+ "enable_mfa": self.auth.enable_mfa,
+ "enable_api_keys": self.auth.enable_api_keys
+ },
+ "security": {
+ "enable_encryption_at_rest": self.security.enable_encryption_at_rest,
+ "enable_rate_limiting": self.security.enable_rate_limiting,
+ "rate_limit_requests_per_minute": self.security.rate_limit_requests_per_minute,
+ "enable_audit_logging": self.security.enable_audit_logging
+ },
+ "monitoring": {
+ "enable_metrics": self.monitoring.enable_metrics,
+ "enable_tracing": self.monitoring.enable_tracing,
+ "metrics_port": self.monitoring.metrics_port,
+ "log_level": self.monitoring.log_level
+ },
+ "scaling": {
+ "enable_auto_scaling": self.scaling.enable_auto_scaling,
+ "min_replicas": self.scaling.min_replicas,
+ "max_replicas": self.scaling.max_replicas
+ }
+ }
+
+ return config_dict
+
+
+# Factory functions for common configurations
+def create_development_config() -> EnterpriseConfig:
+ """Create configuration for development environment."""
+ config = EnterpriseConfig()
+ config.environment = "development"
+ config.auth.access_token_expire_minutes = 480 # 8 hours
+ config.security.enable_rate_limiting = False
+ config.security.enable_audit_logging = False
+ config.monitoring.log_level = "DEBUG"
+ config.scaling.enable_auto_scaling = False
+ return config
+
+
+def create_staging_config() -> EnterpriseConfig:
+ """Create configuration for staging environment."""
+ config = EnterpriseConfig()
+ config.environment = "staging"
+ config.security.rate_limit_requests_per_minute = 500
+ config.monitoring.tracing_sampling_rate = 0.5
+ config.scaling.max_replicas = 5
+ return config
+
+
+def create_production_config() -> EnterpriseConfig:
+ """Create configuration for production environment."""
+ config = EnterpriseConfig()
+ config.environment = "production"
+ config.auth.enable_mfa = True
+ config.security.enable_encryption_at_rest = True
+ config.security.enable_audit_logging = True
+ config.compliance.enabled_standards = [ComplianceStandard.GDPR]
+ config.monitoring.enable_alerting = True
+ config.scaling.enable_auto_scaling = True
+ return config
diff --git a/framework/enterprise/engine.py b/framework/enterprise/engine.py
new file mode 100644
index 0000000..66485af
--- /dev/null
+++ b/framework/enterprise/engine.py
@@ -0,0 +1,598 @@
+"""
+Enterprise inference engine with comprehensive enterprise features.
+
+This module integrates all enterprise components to provide:
+- Secure authentication and authorization
+- Advanced monitoring and observability
+- Model governance and MLOps
+- High availability and scalability
+- Compliance and audit capabilities
+"""
+
+import asyncio
+import time
+from datetime import datetime, timezone
+from typing import Dict, List, Optional, Any, Union, Tuple
+from dataclasses import dataclass
+import logging
+from contextlib import asynccontextmanager
+
+from ..core.inference_engine import InferenceEngine
+from ..core.base_model import BaseModel
+from .config import EnterpriseConfig
+from .auth import EnterpriseAuth, User, Permission
+from .security import SecurityManager, SecurityEvent
+from .monitoring import EnterpriseMonitor
+from .governance import ModelGovernance, ModelPerformanceMetrics
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class EnterpriseInferenceRequest:
+ """Enterprise inference request with security context."""
+ id: str
+ inputs: Any
+ user_id: Optional[str] = None
+ tenant_id: Optional[str] = None
+ session_id: Optional[str] = None
+ ip_address: Optional[str] = None
+ user_agent: Optional[str] = None
+ trace_id: Optional[str] = None
+ permissions: List[str] = None
+ priority: int = 0
+ timeout: Optional[float] = None
+
+ def __post_init__(self):
+ if self.permissions is None:
+ self.permissions = []
+
+
+@dataclass
+class EnterpriseInferenceResponse:
+ """Enterprise inference response with metadata."""
+ request_id: str
+ result: Any
+ model_id: str
+ model_version: str
+ processing_time_ms: float
+ timestamp: datetime
+ trace_id: Optional[str] = None
+ compliance_metadata: Dict[str, Any] = None
+
+ def __post_init__(self):
+ if self.compliance_metadata is None:
+ self.compliance_metadata = {}
+
+
+class EnterpriseInferenceEngine:
+ """
+ Enterprise-grade inference engine with comprehensive security, monitoring,
+ and governance features.
+ """
+
+ def __init__(self, model: BaseModel, config: EnterpriseConfig):
+ self.model = model
+ self.config = config
+
+ # Initialize base inference engine
+ self.base_engine = InferenceEngine(model, config.inference)
+
+ # Initialize enterprise components
+ self.auth = EnterpriseAuth(config)
+ self.security_manager = SecurityManager(config)
+ self.monitor = EnterpriseMonitor(config)
+ self.governance = ModelGovernance(config)
+
+ # Performance tracking
+ self.request_count = 0
+ self.error_count = 0
+ self.start_time = time.time()
+
+ # Setup integration
+ self._setup_integration()
+
+ logger.info("Enterprise inference engine initialized")
+
+ def _setup_integration(self) -> None:
+ """Setup integration between components."""
+ # Add security alert callbacks
+ self.security_manager.add_alert_callback(self._handle_security_alert)
+
+ # Add monitoring alert callbacks
+ if self.monitor.alert_manager:
+ self.monitor.alert_manager.add_alert_callback(self._handle_monitoring_alert)
+
+ # Setup audit logging for model access
+ self._setup_audit_logging()
+
+ def _setup_audit_logging(self) -> None:
+ """Setup audit logging for compliance."""
+ # This would typically integrate with external audit systems
+ logger.info("Audit logging configured")
+
+ def _handle_security_alert(self, alert) -> None:
+ """Handle security alerts."""
+ logger.warning(f"Security Alert: {alert.message}")
+
+ # In production, this would:
+ # - Send notifications to security team
+ # - Update threat intelligence
+ # - Trigger automated responses
+
+ def _handle_monitoring_alert(self, alert) -> None:
+ """Handle monitoring alerts."""
+ logger.warning(f"Monitoring Alert: {alert.message}")
+
+ # In production, this would:
+ # - Send notifications to operations team
+ # - Trigger auto-scaling if needed
+ # - Update dashboards
+
+ async def start(self) -> None:
+ """Start the enterprise inference engine."""
+ logger.info("Starting enterprise inference engine...")
+
+ # Start base engine
+ await self.base_engine.start()
+
+ # Start monitoring
+ self.monitor.start_monitoring()
+
+ logger.info("Enterprise inference engine started successfully")
+
+ async def stop(self) -> None:
+ """Stop the enterprise inference engine."""
+ logger.info("Stopping enterprise inference engine...")
+
+ # Stop monitoring
+ self.monitor.stop_monitoring()
+
+ # Stop base engine
+ await self.base_engine.stop()
+
+ logger.info("Enterprise inference engine stopped")
+
+ async def predict(self, request: EnterpriseInferenceRequest) -> EnterpriseInferenceResponse:
+ """
+ Perform secure, monitored inference with full enterprise features.
+
+ Args:
+ request: Enterprise inference request with security context
+
+ Returns:
+ Enterprise inference response with metadata
+
+ Raises:
+ PermissionError: If user lacks required permissions
+ SecurityError: If security validation fails
+ ValidationError: If input validation fails
+ """
+ start_time = time.time()
+ self.request_count += 1
+
+ # Create trace context
+ if self.monitor.distributed_tracing:
+ trace_context = self.monitor.distributed_tracing.get_current_trace_context()
+ if trace_context:
+ request.trace_id = trace_context.trace_id
+
+ try:
+ # 1. Authentication & Authorization
+ await self._authenticate_and_authorize(request)
+
+ # 2. Security validation
+ await self._validate_security(request)
+
+ # 3. Input validation and sanitization
+ validated_inputs = await self._validate_and_sanitize_inputs(request)
+
+ # 4. Rate limiting check
+ await self._check_rate_limits(request)
+
+ # 5. Model selection and routing (for A/B tests)
+ model_info = await self._select_model(request)
+
+ # 6. Perform inference
+ result = await self._perform_inference(validated_inputs, request)
+
+ # 7. Post-process and validate outputs
+ processed_result = await self._process_outputs(result, request)
+
+ # 8. Record metrics and audit logs
+ processing_time = (time.time() - start_time) * 1000
+ await self._record_success_metrics(request, processing_time, model_info)
+
+ # 9. Create enterprise response
+ response = EnterpriseInferenceResponse(
+ request_id=request.id,
+ result=processed_result,
+ model_id=model_info.get("id", "unknown"),
+ model_version=model_info.get("version", "unknown"),
+ processing_time_ms=processing_time,
+ timestamp=datetime.now(timezone.utc),
+ trace_id=request.trace_id,
+ compliance_metadata=self._create_compliance_metadata(request)
+ )
+
+ return response
+
+ except Exception as e:
+ # Record error metrics
+ self.error_count += 1
+ processing_time = (time.time() - start_time) * 1000
+ await self._record_error_metrics(request, str(e), processing_time)
+
+ # Log security event if relevant
+ if isinstance(e, PermissionError):
+ self.security_manager.log_security_event(
+ SecurityEvent.AUTHORIZATION_ERROR,
+ request.user_id,
+ f"Permission denied: {e}",
+ {"request_id": request.id, "permission_required": str(e)},
+ request.ip_address
+ )
+
+ raise
+
+ async def predict_batch(self, requests: List[EnterpriseInferenceRequest]) -> List[EnterpriseInferenceResponse]:
+ """Perform batch inference with enterprise features."""
+ if not requests:
+ return []
+
+ # Process requests concurrently while respecting rate limits
+ semaphore = asyncio.Semaphore(self.config.scaling.max_replicas)
+
+ async def process_single_request(req):
+ async with semaphore:
+ return await self.predict(req)
+
+ tasks = [process_single_request(req) for req in requests]
+ responses = await asyncio.gather(*tasks, return_exceptions=True)
+
+ # Filter out exceptions and log them
+ valid_responses = []
+ for i, response in enumerate(responses):
+ if isinstance(response, Exception):
+ logger.error(f"Batch request {requests[i].id} failed: {response}")
+ # Could create error response here
+ else:
+ valid_responses.append(response)
+
+ return valid_responses
+
+ async def _authenticate_and_authorize(self, request: EnterpriseInferenceRequest) -> User:
+ """Authenticate user and check permissions."""
+ if not request.user_id and not request.session_id:
+ raise PermissionError("Authentication required")
+
+ # Get user from session or user_id
+ user = None
+ if request.session_id:
+ session = self.auth.session_manager.get_session(request.session_id)
+ if not session or not session.is_valid():
+ raise PermissionError("Invalid or expired session")
+ user = self.auth.rbac_manager.get_user(session.user_id)
+ elif request.user_id:
+ user = self.auth.rbac_manager.get_user(request.user_id)
+
+ if not user or not user.is_active:
+ raise PermissionError("User not found or inactive")
+
+ # Check inference permission
+ if not user.has_permission(Permission.INFERENCE_PREDICT, self.auth.rbac_manager):
+ raise PermissionError("Insufficient permissions for inference")
+
+ # Check tenant isolation
+ if (self.config.rbac.tenant_isolation and request.tenant_id and
+ user.tenant_id != request.tenant_id):
+ raise PermissionError("Tenant access denied")
+
+ # Update request with user permissions
+ request.permissions = self.auth.rbac_manager._get_user_permissions(user.id)
+
+ return user
+
+ async def _validate_security(self, request: EnterpriseInferenceRequest) -> None:
+ """Perform security validation."""
+ # Validate request against security policies
+ client_id = request.user_id or request.ip_address or "anonymous"
+ endpoint = "inference"
+
+ is_valid, error_msg = self.security_manager.validate_request(
+ client_id, request.inputs, endpoint
+ )
+
+ if not is_valid:
+ raise ValueError(f"Security validation failed: {error_msg}")
+
+ async def _validate_and_sanitize_inputs(self, request: EnterpriseInferenceRequest) -> Any:
+ """Validate and sanitize input data."""
+ # Input validation based on model requirements
+ if hasattr(self.model, 'validate_input'):
+ is_valid, error_msg = self.model.validate_input(request.inputs)
+ if not is_valid:
+ raise ValueError(f"Input validation failed: {error_msg}")
+
+ # Security-based input sanitization
+ if hasattr(request.inputs, 'items'): # Dictionary-like
+ sanitized = {}
+ for key, value in request.inputs.items():
+ if isinstance(value, str):
+ sanitized[key] = self.security_manager.input_validator.sanitize_input(value)
+ else:
+ sanitized[key] = value
+ return sanitized
+
+ return request.inputs
+
+ async def _check_rate_limits(self, request: EnterpriseInferenceRequest) -> None:
+ """Check rate limiting."""
+ client_id = request.user_id or request.ip_address or "anonymous"
+
+ allowed, rate_info = self.security_manager.rate_limiter.is_allowed(client_id, "inference")
+
+ if not allowed:
+ raise RuntimeError(f"Rate limit exceeded. Retry after {rate_info['retry_after']} seconds")
+
+ async def _select_model(self, request: EnterpriseInferenceRequest) -> Dict[str, Any]:
+ """Select model for inference (handles A/B testing)."""
+ # In production, this would check for active A/B tests
+ # and route requests accordingly
+
+ return {
+ "id": self.model.model_info.get("id", "default"),
+ "version": self.model.model_info.get("version", "1.0.0"),
+ "name": self.model.model_info.get("name", "default_model")
+ }
+
+ async def _perform_inference(self, inputs: Any, request: EnterpriseInferenceRequest) -> Any:
+ """Perform the actual inference."""
+ # Add tracing information
+ if self.monitor.distributed_tracing:
+ span = self.monitor.distributed_tracing.create_span("model_inference")
+ span.set_attribute("user_id", request.user_id or "anonymous")
+ span.set_attribute("model_id", self.model.model_info.get("id", "unknown"))
+
+ # Perform inference using base engine
+ result = await self.base_engine.predict(inputs, request.priority, request.timeout)
+
+ return result
+
+ async def _process_outputs(self, result: Any, request: EnterpriseInferenceRequest) -> Any:
+ """Post-process and validate outputs."""
+ # Output sanitization for security
+ if self.config.security.sanitize_outputs:
+ # Apply output sanitization rules
+ if isinstance(result, dict):
+ sanitized = {}
+ for key, value in result.items():
+ if isinstance(value, str):
+ sanitized[key] = self.security_manager.input_validator.sanitize_input(value)
+ else:
+ sanitized[key] = value
+ return sanitized
+
+ return result
+
+ async def _record_success_metrics(self, request: EnterpriseInferenceRequest,
+ processing_time: float, model_info: Dict[str, Any]) -> None:
+ """Record successful inference metrics."""
+ # Monitor HTTP-style request
+ self.monitor.record_request(
+ method="POST",
+ endpoint="/predict",
+ status="200",
+ duration=processing_time / 1000, # Convert to seconds
+ user_id=request.user_id
+ )
+
+ # Monitor inference-specific metrics
+ self.monitor.record_inference(
+ model=model_info["id"],
+ duration=processing_time / 1000,
+ status="success",
+ tenant=request.tenant_id or "default"
+ )
+
+ # Record model performance for governance
+ performance_metrics = ModelPerformanceMetrics(
+ model_id=model_info["id"],
+ version=model_info["version"],
+ timestamp=datetime.now(timezone.utc),
+ latency_p95_ms=processing_time, # Single request latency
+ throughput_rps=1.0 / (processing_time / 1000),
+ prediction_count=1,
+ error_count=0
+ )
+
+ self.governance.record_model_performance(
+ model_info["id"],
+ model_info["version"],
+ performance_metrics
+ )
+
+ # Audit logging
+ self.security_manager.audit_logger.log_action(
+ user_id=request.user_id,
+ action="inference_request",
+ resource=f"model:{model_info['id']}",
+ details={
+ "model_version": model_info["version"],
+ "processing_time_ms": processing_time,
+ "input_size": len(str(request.inputs)) if request.inputs else 0
+ },
+ ip_address=request.ip_address,
+ user_agent=request.user_agent,
+ tenant_id=request.tenant_id
+ )
+
+ async def _record_error_metrics(self, request: EnterpriseInferenceRequest,
+ error: str, processing_time: float) -> None:
+ """Record error metrics."""
+ # Monitor HTTP-style error
+ self.monitor.record_request(
+ method="POST",
+ endpoint="/predict",
+ status="500",
+ duration=processing_time / 1000,
+ user_id=request.user_id
+ )
+
+ # Monitor inference error
+ model_info = await self._select_model(request)
+ self.monitor.record_inference(
+ model=model_info["id"],
+ duration=processing_time / 1000,
+ status="error",
+ tenant=request.tenant_id or "default"
+ )
+
+ # Record error in governance
+ performance_metrics = ModelPerformanceMetrics(
+ model_id=model_info["id"],
+ version=model_info["version"],
+ timestamp=datetime.now(timezone.utc),
+ latency_p95_ms=processing_time,
+ prediction_count=0,
+ error_count=1
+ )
+
+ self.governance.record_model_performance(
+ model_info["id"],
+ model_info["version"],
+ performance_metrics
+ )
+
+ # Audit logging
+ self.security_manager.audit_logger.log_action(
+ user_id=request.user_id,
+ action="inference_error",
+ resource=f"model:{model_info['id']}",
+ details={
+ "error": error,
+ "processing_time_ms": processing_time
+ },
+ ip_address=request.ip_address,
+ tenant_id=request.tenant_id,
+ success=False
+ )
+
+ def _create_compliance_metadata(self, request: EnterpriseInferenceRequest) -> Dict[str, Any]:
+ """Create compliance metadata for response."""
+ metadata = {
+ "data_retention_policy": "standard",
+ "processing_purpose": "ml_inference",
+ "user_consent": True, # Would check actual consent in production
+ }
+
+ # Add GDPR-specific metadata if enabled
+ if any(std.value == "gdpr" for std in self.config.compliance.enabled_standards):
+ metadata.update({
+ "gdpr_lawful_basis": "legitimate_interest",
+ "data_controller": "torch_inference_service",
+ "retention_period_days": self.config.compliance.gdpr_data_retention_days
+ })
+
+ return metadata
+
+ async def health_check(self) -> Dict[str, Any]:
+ """Comprehensive health check."""
+ health_status = {
+ "status": "healthy",
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "uptime_seconds": time.time() - self.start_time,
+ "components": {},
+ "metrics": {},
+ "version": self.config.version
+ }
+
+ # Check base engine health
+ base_health = await self.base_engine.health_check()
+ health_status["components"]["inference_engine"] = base_health
+
+ # Check enterprise components
+ health_status["components"]["monitoring"] = self.monitor.get_health_status()
+ health_status["components"]["security"] = self.security_manager.get_security_metrics()
+
+ # Overall metrics
+ health_status["metrics"] = {
+ "total_requests": self.request_count,
+ "error_count": self.error_count,
+ "error_rate": self.error_count / max(self.request_count, 1),
+ "requests_per_second": self.request_count / (time.time() - self.start_time)
+ }
+
+ # Determine overall health
+ if health_status["metrics"]["error_rate"] > 0.1: # >10% error rate
+ health_status["status"] = "unhealthy"
+ elif any(not comp.get("healthy", True) for comp in health_status["components"].values()):
+ health_status["status"] = "degraded"
+
+ return health_status
+
+ def get_enterprise_dashboard_data(self) -> Dict[str, Any]:
+ """Get comprehensive dashboard data."""
+ return {
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "system_health": self.monitor.get_health_status(),
+ "security_metrics": self.security_manager.get_security_metrics(),
+ "monitoring_data": self.monitor.get_monitoring_dashboard_data(),
+ "governance_data": self.governance.get_governance_dashboard(),
+ "performance_metrics": {
+ "total_requests": self.request_count,
+ "error_count": self.error_count,
+ "uptime_hours": (time.time() - self.start_time) / 3600
+ }
+ }
+
+ def get_compliance_report(self) -> Dict[str, Any]:
+ """Generate compliance report."""
+ return {
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "compliance_standards": [std.value for std in self.config.compliance.enabled_standards],
+ "audit_summary": {
+ "total_audit_entries": len(self.security_manager.audit_logger.log_entries),
+ "security_events": len([
+ entry for entry in self.security_manager.audit_logger.log_entries
+ if "security" in entry.action
+ ]),
+ "model_accesses": len([
+ entry for entry in self.security_manager.audit_logger.log_entries
+ if entry.action == "inference_request"
+ ])
+ },
+ "data_governance": {
+ "models_under_governance": len(self.governance.model_registry.list_models()),
+ "active_experiments": len([
+ exp for exp in self.governance.experiment_tracker.experiments.values()
+ if exp.status.value == "running"
+ ])
+ },
+ "security_posture": self.security_manager.get_security_metrics()
+ }
+
+ @asynccontextmanager
+ async def enterprise_context(self):
+ """Context manager for enterprise engine lifecycle."""
+ await self.start()
+ try:
+ yield self
+ finally:
+ await self.stop()
+
+
+# Factory functions for creating enterprise engines
+async def create_enterprise_engine(model: BaseModel, config: EnterpriseConfig) -> EnterpriseInferenceEngine:
+ """Create and initialize enterprise inference engine."""
+ engine = EnterpriseInferenceEngine(model, config)
+ await engine.start()
+ return engine
+
+
+def create_enterprise_config_from_env() -> EnterpriseConfig:
+ """Create enterprise configuration from environment variables."""
+ config = EnterpriseConfig.from_env()
+ config.validate()
+ return config
diff --git a/framework/enterprise/governance.py b/framework/enterprise/governance.py
new file mode 100644
index 0000000..ba5d567
--- /dev/null
+++ b/framework/enterprise/governance.py
@@ -0,0 +1,1218 @@
+"""
+Model governance and MLOps management system.
+
+This module provides comprehensive MLOps capabilities including:
+- Model versioning and lifecycle management
+- A/B testing and canary deployments
+- Model performance monitoring
+- Experiment tracking
+- Model registry integration
+- Automated validation and testing
+"""
+
+import json
+import hashlib
+import time
+import asyncio
+from datetime import datetime, timedelta, timezone
+from typing import Dict, List, Optional, Any, Union, Tuple, Callable
+from dataclasses import dataclass, field, asdict
+from enum import Enum
+import logging
+import secrets
+from pathlib import Path
+from abc import ABC, abstractmethod
+import threading
+from collections import defaultdict, deque
+
+from .config import EnterpriseConfig
+
+
+logger = logging.getLogger(__name__)
+
+
+class ModelStatus(Enum):
+ """Model deployment status."""
+ PENDING = "pending"
+ VALIDATING = "validating"
+ ACTIVE = "active"
+ DEPRECATED = "deprecated"
+ FAILED = "failed"
+ ARCHIVED = "archived"
+
+
+class DeploymentStrategy(Enum):
+ """Model deployment strategies."""
+ BLUE_GREEN = "blue_green"
+ CANARY = "canary"
+ ROLLING = "rolling"
+ A_B_TEST = "a_b_test"
+
+
+class ExperimentStatus(Enum):
+ """Experiment status."""
+ DRAFT = "draft"
+ RUNNING = "running"
+ COMPLETED = "completed"
+ FAILED = "failed"
+ CANCELLED = "cancelled"
+
+
+@dataclass
+class ModelMetadata:
+ """Model metadata and information."""
+ id: str
+ name: str
+ version: str
+ description: str
+ framework: str # pytorch, tensorflow, onnx
+ architecture: str
+ input_shape: Tuple[int, ...]
+ output_shape: Tuple[int, ...]
+ parameters_count: int
+ model_size_mb: float
+
+ # Training information
+ training_dataset: Optional[str] = None
+ training_accuracy: Optional[float] = None
+ validation_accuracy: Optional[float] = None
+ training_duration_hours: Optional[float] = None
+
+ # Deployment information
+ created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_by: Optional[str] = None
+ tags: List[str] = field(default_factory=list)
+
+ # Performance benchmarks
+ inference_time_ms: Optional[float] = None
+ throughput_rps: Optional[float] = None
+ memory_usage_mb: Optional[float] = None
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary."""
+ data = asdict(self)
+ data["created_at"] = self.created_at.isoformat()
+ return data
+
+
+@dataclass
+class ModelVersion:
+ """Model version information."""
+ model_id: str
+ version: str
+ status: ModelStatus
+ file_path: str
+ checksum: str
+ metadata: ModelMetadata
+
+ # Deployment information
+ deployed_at: Optional[datetime] = None
+ deployment_config: Dict[str, Any] = field(default_factory=dict)
+
+ # Performance metrics
+ performance_metrics: Dict[str, float] = field(default_factory=dict)
+
+ # Validation results
+ validation_results: Dict[str, Any] = field(default_factory=dict)
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary."""
+ data = asdict(self)
+ data["status"] = self.status.value
+ data["deployed_at"] = self.deployed_at.isoformat() if self.deployed_at else None
+ return data
+
+
+@dataclass
+class Experiment:
+ """ML experiment tracking."""
+ id: str
+ name: str
+ description: str
+ status: ExperimentStatus
+ model_id: str
+
+ # Configuration
+ hyperparameters: Dict[str, Any] = field(default_factory=dict)
+ configuration: Dict[str, Any] = field(default_factory=dict)
+
+ # Metrics
+ metrics: Dict[str, float] = field(default_factory=dict)
+ artifacts: List[str] = field(default_factory=list)
+
+ # Timing
+ started_at: Optional[datetime] = None
+ completed_at: Optional[datetime] = None
+ created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_by: Optional[str] = None
+
+ # Results
+ results: Dict[str, Any] = field(default_factory=dict)
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary."""
+ data = asdict(self)
+ data["status"] = self.status.value
+ data["started_at"] = self.started_at.isoformat() if self.started_at else None
+ data["completed_at"] = self.completed_at.isoformat() if self.completed_at else None
+ data["created_at"] = self.created_at.isoformat()
+ return data
+
+
+@dataclass
+class ABTestConfig:
+ """A/B test configuration."""
+ id: str
+ name: str
+ model_a_id: str
+ model_b_id: str
+ traffic_split_percent: int # Percentage for model B (0-100)
+
+ # Test criteria
+ success_metrics: List[str] = field(default_factory=list)
+ min_sample_size: int = 1000
+ confidence_level: float = 0.95
+ test_duration_hours: int = 24
+
+ # Status
+ started_at: Optional[datetime] = None
+ status: str = "draft" # draft, running, completed, cancelled
+
+ # Results
+ results: Dict[str, Any] = field(default_factory=dict)
+
+
+@dataclass
+class ModelPerformanceMetrics:
+ """Model performance tracking."""
+ model_id: str
+ version: str
+ timestamp: datetime
+
+ # Accuracy metrics
+ accuracy: Optional[float] = None
+ precision: Optional[float] = None
+ recall: Optional[float] = None
+ f1_score: Optional[float] = None
+
+ # Performance metrics
+ latency_p50_ms: Optional[float] = None
+ latency_p95_ms: Optional[float] = None
+ latency_p99_ms: Optional[float] = None
+ throughput_rps: Optional[float] = None
+
+ # Resource metrics
+ cpu_usage_percent: Optional[float] = None
+ memory_usage_mb: Optional[float] = None
+ gpu_utilization_percent: Optional[float] = None
+
+ # Business metrics
+ prediction_count: int = 0
+ error_count: int = 0
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary."""
+ data = asdict(self)
+ data["timestamp"] = self.timestamp.isoformat()
+ return data
+
+
+class ModelRegistry:
+ """Model registry for version management."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.models: Dict[str, Dict[str, ModelVersion]] = {} # model_id -> version -> ModelVersion
+ self.metadata_cache: Dict[str, ModelMetadata] = {}
+
+ # Model storage path
+ self.storage_path = Path("models")
+ self.storage_path.mkdir(exist_ok=True)
+
+ def register_model(self, metadata: ModelMetadata, model_file_path: str) -> ModelVersion:
+ """Register new model version."""
+ # Calculate checksum
+ checksum = self._calculate_file_checksum(model_file_path)
+
+ # Create model version
+ model_version = ModelVersion(
+ model_id=metadata.id,
+ version=metadata.version,
+ status=ModelStatus.PENDING,
+ file_path=model_file_path,
+ checksum=checksum,
+ metadata=metadata
+ )
+
+ # Store in registry
+ if metadata.id not in self.models:
+ self.models[metadata.id] = {}
+
+ self.models[metadata.id][metadata.version] = model_version
+ self.metadata_cache[f"{metadata.id}:{metadata.version}"] = metadata
+
+ logger.info(f"Registered model {metadata.id}:{metadata.version}")
+ return model_version
+
+ def get_model_version(self, model_id: str, version: str) -> Optional[ModelVersion]:
+ """Get specific model version."""
+ return self.models.get(model_id, {}).get(version)
+
+ def get_latest_version(self, model_id: str) -> Optional[ModelVersion]:
+ """Get latest version of model."""
+ if model_id not in self.models:
+ return None
+
+ versions = self.models[model_id]
+ if not versions:
+ return None
+
+ # Sort versions by created_at
+ latest = max(versions.values(), key=lambda v: v.metadata.created_at)
+ return latest
+
+ def get_active_version(self, model_id: str) -> Optional[ModelVersion]:
+ """Get currently active version of model."""
+ if model_id not in self.models:
+ return None
+
+ for version in self.models[model_id].values():
+ if version.status == ModelStatus.ACTIVE:
+ return version
+
+ return None
+
+ def list_models(self) -> List[str]:
+ """List all registered models."""
+ return list(self.models.keys())
+
+ def list_versions(self, model_id: str) -> List[str]:
+ """List all versions of a model."""
+ return list(self.models.get(model_id, {}).keys())
+
+ def update_model_status(self, model_id: str, version: str, status: ModelStatus) -> bool:
+ """Update model status."""
+ model_version = self.get_model_version(model_id, version)
+ if model_version:
+ old_status = model_version.status
+ model_version.status = status
+
+ if status == ModelStatus.ACTIVE:
+ # Deactivate other versions
+ for v in self.models[model_id].values():
+ if v.version != version and v.status == ModelStatus.ACTIVE:
+ v.status = ModelStatus.DEPRECATED
+
+ model_version.deployed_at = datetime.now(timezone.utc)
+
+ logger.info(f"Updated model {model_id}:{version} status: {old_status.value} -> {status.value}")
+ return True
+
+ return False
+
+ def _calculate_file_checksum(self, file_path: str) -> str:
+ """Calculate SHA256 checksum of file."""
+ sha256_hash = hashlib.sha256()
+ with open(file_path, "rb") as f:
+ for chunk in iter(lambda: f.read(4096), b""):
+ sha256_hash.update(chunk)
+ return sha256_hash.hexdigest()
+
+ def search_models(self, tags: Optional[List[str]] = None,
+ framework: Optional[str] = None,
+ min_accuracy: Optional[float] = None) -> List[ModelMetadata]:
+ """Search models by criteria."""
+ results = []
+
+ for model_versions in self.models.values():
+ for version in model_versions.values():
+ metadata = version.metadata
+
+ # Filter by tags
+ if tags and not any(tag in metadata.tags for tag in tags):
+ continue
+
+ # Filter by framework
+ if framework and metadata.framework.lower() != framework.lower():
+ continue
+
+ # Filter by accuracy
+ if (min_accuracy and metadata.training_accuracy and
+ metadata.training_accuracy < min_accuracy):
+ continue
+
+ results.append(metadata)
+
+ return results
+
+
+class ModelValidator:
+ """Model validation and testing."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.validation_callbacks: List[Callable] = []
+
+ def add_validation_callback(self, callback: Callable) -> None:
+ """Add custom validation callback."""
+ self.validation_callbacks.append(callback)
+
+ async def validate_model(self, model_version: ModelVersion) -> Dict[str, Any]:
+ """Validate model before deployment."""
+ validation_results = {
+ "model_id": model_version.model_id,
+ "version": model_version.version,
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "validations": {},
+ "overall_status": "passed",
+ "issues": []
+ }
+
+ # File integrity check
+ file_check = self._validate_file_integrity(model_version)
+ validation_results["validations"]["file_integrity"] = file_check
+
+ # Model format validation
+ format_check = self._validate_model_format(model_version)
+ validation_results["validations"]["model_format"] = format_check
+
+ # Performance benchmarks
+ performance_check = await self._validate_performance(model_version)
+ validation_results["validations"]["performance"] = performance_check
+
+ # Custom validations
+ for callback in self.validation_callbacks:
+ try:
+ custom_result = await callback(model_version)
+ validation_results["validations"][f"custom_{callback.__name__}"] = custom_result
+ except Exception as e:
+ logger.error(f"Custom validation failed: {e}")
+ validation_results["validations"][f"custom_{callback.__name__}"] = {
+ "passed": False,
+ "error": str(e)
+ }
+
+ # Determine overall status
+ failed_validations = [
+ name for name, result in validation_results["validations"].items()
+ if not result.get("passed", False)
+ ]
+
+ if failed_validations:
+ validation_results["overall_status"] = "failed"
+ validation_results["issues"] = failed_validations
+
+ # Update model version with validation results
+ model_version.validation_results = validation_results
+
+ return validation_results
+
+ def _validate_file_integrity(self, model_version: ModelVersion) -> Dict[str, Any]:
+ """Validate file exists and checksum matches."""
+ try:
+ file_path = Path(model_version.file_path)
+
+ if not file_path.exists():
+ return {
+ "passed": False,
+ "message": "Model file does not exist"
+ }
+
+ # Verify checksum
+ sha256_hash = hashlib.sha256()
+ with open(file_path, "rb") as f:
+ for chunk in iter(lambda: f.read(4096), b""):
+ sha256_hash.update(chunk)
+
+ actual_checksum = sha256_hash.hexdigest()
+ if actual_checksum != model_version.checksum:
+ return {
+ "passed": False,
+ "message": "Checksum mismatch",
+ "expected": model_version.checksum,
+ "actual": actual_checksum
+ }
+
+ return {
+ "passed": True,
+ "message": "File integrity validated",
+ "file_size_mb": file_path.stat().st_size / (1024 * 1024)
+ }
+
+ except Exception as e:
+ return {
+ "passed": False,
+ "message": f"File validation error: {e}"
+ }
+
+ def _validate_model_format(self, model_version: ModelVersion) -> Dict[str, Any]:
+ """Validate model can be loaded."""
+ try:
+ metadata = model_version.metadata
+
+ if metadata.framework.lower() == "pytorch":
+ import torch
+ model = torch.load(model_version.file_path, map_location='cpu')
+ return {
+ "passed": True,
+ "message": "PyTorch model loaded successfully",
+ "model_type": str(type(model))
+ }
+
+ elif metadata.framework.lower() == "onnx":
+ import onnx
+ model = onnx.load(model_version.file_path)
+ onnx.checker.check_model(model)
+ return {
+ "passed": True,
+ "message": "ONNX model validated successfully"
+ }
+
+ else:
+ return {
+ "passed": True,
+ "message": f"Format validation skipped for {metadata.framework}"
+ }
+
+ except Exception as e:
+ return {
+ "passed": False,
+ "message": f"Model format validation failed: {e}"
+ }
+
+ async def _validate_performance(self, model_version: ModelVersion) -> Dict[str, Any]:
+ """Validate model performance benchmarks."""
+ try:
+ # This would run actual performance tests
+ # For now, we'll simulate the validation
+
+ metadata = model_version.metadata
+
+ # Check if model size is reasonable
+ if metadata.model_size_mb > 1000: # > 1GB
+ return {
+ "passed": False,
+ "message": f"Model too large: {metadata.model_size_mb:.1f}MB",
+ "threshold": "1000MB"
+ }
+
+ # Check parameter count
+ if metadata.parameters_count > 1e9: # > 1B parameters
+ return {
+ "passed": False,
+ "message": f"Too many parameters: {metadata.parameters_count:,}",
+ "threshold": "1,000,000,000"
+ }
+
+ return {
+ "passed": True,
+ "message": "Performance validation passed",
+ "model_size_mb": metadata.model_size_mb,
+ "parameters": metadata.parameters_count
+ }
+
+ except Exception as e:
+ return {
+ "passed": False,
+ "message": f"Performance validation error: {e}"
+ }
+
+
+class ABTestManager:
+ """A/B testing management."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.active_tests: Dict[str, ABTestConfig] = {}
+ self.test_results: Dict[str, Dict] = defaultdict(dict)
+ self.traffic_router = TrafficRouter()
+
+ def create_ab_test(self, name: str, model_a_id: str, model_b_id: str,
+ traffic_split_percent: int = 50,
+ success_metrics: List[str] = None) -> ABTestConfig:
+ """Create new A/B test."""
+ test_id = f"ab_test_{secrets.token_urlsafe(8)}"
+
+ test_config = ABTestConfig(
+ id=test_id,
+ name=name,
+ model_a_id=model_a_id,
+ model_b_id=model_b_id,
+ traffic_split_percent=traffic_split_percent,
+ success_metrics=success_metrics or ["accuracy", "latency"]
+ )
+
+ self.active_tests[test_id] = test_config
+ logger.info(f"Created A/B test {test_id}: {model_a_id} vs {model_b_id}")
+
+ return test_config
+
+ def start_ab_test(self, test_id: str) -> bool:
+ """Start A/B test."""
+ test_config = self.active_tests.get(test_id)
+ if not test_config:
+ return False
+
+ test_config.started_at = datetime.now(timezone.utc)
+ test_config.status = "running"
+
+ # Configure traffic routing
+ self.traffic_router.add_route(
+ test_id,
+ test_config.model_a_id,
+ test_config.model_b_id,
+ test_config.traffic_split_percent
+ )
+
+ logger.info(f"Started A/B test {test_id}")
+ return True
+
+ def record_test_result(self, test_id: str, model_id: str,
+ metrics: Dict[str, float]) -> None:
+ """Record test result for analysis."""
+ if test_id not in self.active_tests:
+ return
+
+ if model_id not in self.test_results[test_id]:
+ self.test_results[test_id][model_id] = {
+ "samples": [],
+ "metrics": defaultdict(list)
+ }
+
+ # Record metrics
+ result_data = self.test_results[test_id][model_id]
+ result_data["samples"].append({
+ "timestamp": datetime.now(timezone.utc),
+ "metrics": metrics
+ })
+
+ for metric_name, value in metrics.items():
+ result_data["metrics"][metric_name].append(value)
+
+ def analyze_test_results(self, test_id: str) -> Dict[str, Any]:
+ """Analyze A/B test results."""
+ test_config = self.active_tests.get(test_id)
+ if not test_config:
+ return {"error": "Test not found"}
+
+ test_data = self.test_results[test_id]
+ analysis = {
+ "test_id": test_id,
+ "test_name": test_config.name,
+ "status": test_config.status,
+ "models": {},
+ "statistical_significance": {},
+ "recommendation": "inconclusive"
+ }
+
+ # Analyze each model's performance
+ for model_id, data in test_data.items():
+ if not data["samples"]:
+ continue
+
+ model_analysis = {
+ "sample_count": len(data["samples"]),
+ "metrics": {}
+ }
+
+ for metric_name, values in data["metrics"].items():
+ if values:
+ model_analysis["metrics"][metric_name] = {
+ "mean": sum(values) / len(values),
+ "min": min(values),
+ "max": max(values),
+ "std": self._calculate_std(values)
+ }
+
+ analysis["models"][model_id] = model_analysis
+
+ # Statistical significance testing (simplified)
+ if len(analysis["models"]) == 2:
+ model_ids = list(analysis["models"].keys())
+ model_a_data = analysis["models"][model_ids[0]]
+ model_b_data = analysis["models"][model_ids[1]]
+
+ # Check if we have enough samples
+ min_samples = min(
+ model_a_data["sample_count"],
+ model_b_data["sample_count"]
+ )
+
+ if min_samples >= test_config.min_sample_size:
+ analysis["statistical_significance"]["sufficient_samples"] = True
+
+ # Simple comparison (in production, use proper statistical tests)
+ for metric in test_config.success_metrics:
+ if (metric in model_a_data["metrics"] and
+ metric in model_b_data["metrics"]):
+
+ a_mean = model_a_data["metrics"][metric]["mean"]
+ b_mean = model_b_data["metrics"][metric]["mean"]
+
+ improvement = ((b_mean - a_mean) / a_mean) * 100
+ analysis["statistical_significance"][metric] = {
+ "improvement_percent": improvement,
+ "significant": abs(improvement) > 5 # 5% threshold
+ }
+
+ # Make recommendation
+ significant_improvements = [
+ metric for metric, data in analysis["statistical_significance"].items()
+ if isinstance(data, dict) and data.get("significant", False) and data.get("improvement_percent", 0) > 0
+ ]
+
+ if significant_improvements:
+ analysis["recommendation"] = "deploy_model_b"
+ elif any(
+ data.get("improvement_percent", 0) < -5
+ for data in analysis["statistical_significance"].values()
+ if isinstance(data, dict)
+ ):
+ analysis["recommendation"] = "keep_model_a"
+ else:
+ analysis["recommendation"] = "no_significant_difference"
+ else:
+ analysis["statistical_significance"]["sufficient_samples"] = False
+
+ return analysis
+
+ def _calculate_std(self, values: List[float]) -> float:
+ """Calculate standard deviation."""
+ if len(values) < 2:
+ return 0.0
+
+ mean = sum(values) / len(values)
+ variance = sum((x - mean) ** 2 for x in values) / (len(values) - 1)
+ return variance ** 0.5
+
+ def stop_test(self, test_id: str) -> bool:
+ """Stop A/B test."""
+ test_config = self.active_tests.get(test_id)
+ if not test_config:
+ return False
+
+ test_config.status = "completed"
+
+ # Analyze final results
+ final_results = self.analyze_test_results(test_id)
+ test_config.results = final_results
+
+ # Remove traffic routing
+ self.traffic_router.remove_route(test_id)
+
+ logger.info(f"Stopped A/B test {test_id}")
+ return True
+
+
+class TrafficRouter:
+ """Traffic routing for A/B tests."""
+
+ def __init__(self):
+ self.routes: Dict[str, Dict[str, Any]] = {}
+
+ def add_route(self, test_id: str, model_a_id: str, model_b_id: str,
+ split_percent: int) -> None:
+ """Add traffic routing rule."""
+ self.routes[test_id] = {
+ "model_a": model_a_id,
+ "model_b": model_b_id,
+ "split_percent": split_percent
+ }
+
+ def get_model_for_request(self, test_id: str, request_hash: str) -> Optional[str]:
+ """Determine which model to use for request."""
+ if test_id not in self.routes:
+ return None
+
+ route = self.routes[test_id]
+
+ # Use request hash to determine routing (consistent for same request)
+ hash_value = int(hashlib.md5(request_hash.encode()).hexdigest(), 16)
+ traffic_percent = hash_value % 100
+
+ if traffic_percent < route["split_percent"]:
+ return route["model_b"]
+ else:
+ return route["model_a"]
+
+ def remove_route(self, test_id: str) -> None:
+ """Remove traffic routing rule."""
+ if test_id in self.routes:
+ del self.routes[test_id]
+
+
+class ExperimentTracker:
+ """Experiment tracking and management."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.experiments: Dict[str, Experiment] = {}
+ self.active_experiments: Dict[str, threading.Thread] = {}
+
+ def create_experiment(self, name: str, description: str, model_id: str,
+ hyperparameters: Dict[str, Any] = None,
+ created_by: Optional[str] = None) -> Experiment:
+ """Create new experiment."""
+ experiment_id = f"exp_{secrets.token_urlsafe(8)}"
+
+ experiment = Experiment(
+ id=experiment_id,
+ name=name,
+ description=description,
+ status=ExperimentStatus.DRAFT,
+ model_id=model_id,
+ hyperparameters=hyperparameters or {},
+ created_by=created_by
+ )
+
+ self.experiments[experiment_id] = experiment
+ logger.info(f"Created experiment {experiment_id}: {name}")
+
+ return experiment
+
+ def start_experiment(self, experiment_id: str) -> bool:
+ """Start experiment execution."""
+ experiment = self.experiments.get(experiment_id)
+ if not experiment:
+ return False
+
+ experiment.status = ExperimentStatus.RUNNING
+ experiment.started_at = datetime.now(timezone.utc)
+
+ logger.info(f"Started experiment {experiment_id}")
+ return True
+
+ def log_metric(self, experiment_id: str, metric_name: str, value: float) -> None:
+ """Log experiment metric."""
+ experiment = self.experiments.get(experiment_id)
+ if experiment:
+ experiment.metrics[metric_name] = value
+
+ def log_artifact(self, experiment_id: str, artifact_path: str) -> None:
+ """Log experiment artifact."""
+ experiment = self.experiments.get(experiment_id)
+ if experiment:
+ experiment.artifacts.append(artifact_path)
+
+ def complete_experiment(self, experiment_id: str,
+ results: Dict[str, Any] = None) -> bool:
+ """Complete experiment."""
+ experiment = self.experiments.get(experiment_id)
+ if not experiment:
+ return False
+
+ experiment.status = ExperimentStatus.COMPLETED
+ experiment.completed_at = datetime.now(timezone.utc)
+ experiment.results = results or {}
+
+ logger.info(f"Completed experiment {experiment_id}")
+ return True
+
+ def get_experiment_metrics(self, experiment_id: str) -> Dict[str, float]:
+ """Get experiment metrics."""
+ experiment = self.experiments.get(experiment_id)
+ return experiment.metrics if experiment else {}
+
+ def compare_experiments(self, experiment_ids: List[str]) -> Dict[str, Any]:
+ """Compare multiple experiments."""
+ comparison = {
+ "experiments": {},
+ "common_metrics": [],
+ "best_experiment": None
+ }
+
+ # Collect experiment data
+ valid_experiments = []
+ all_metrics = set()
+
+ for exp_id in experiment_ids:
+ experiment = self.experiments.get(exp_id)
+ if experiment:
+ valid_experiments.append(experiment)
+ comparison["experiments"][exp_id] = {
+ "name": experiment.name,
+ "status": experiment.status.value,
+ "metrics": experiment.metrics,
+ "hyperparameters": experiment.hyperparameters,
+ "duration_hours": self._calculate_duration_hours(experiment)
+ }
+ all_metrics.update(experiment.metrics.keys())
+
+ # Find common metrics
+ if valid_experiments:
+ common_metrics = set(valid_experiments[0].metrics.keys())
+ for exp in valid_experiments[1:]:
+ common_metrics.intersection_update(exp.metrics.keys())
+ comparison["common_metrics"] = list(common_metrics)
+
+ # Find best experiment (simplified - uses first common metric)
+ if comparison["common_metrics"]:
+ metric_name = comparison["common_metrics"][0]
+ best_exp = max(
+ valid_experiments,
+ key=lambda exp: exp.metrics.get(metric_name, 0)
+ )
+ comparison["best_experiment"] = {
+ "id": best_exp.id,
+ "name": best_exp.name,
+ "metric": metric_name,
+ "value": best_exp.metrics[metric_name]
+ }
+
+ return comparison
+
+ def _calculate_duration_hours(self, experiment: Experiment) -> Optional[float]:
+ """Calculate experiment duration in hours."""
+ if not experiment.started_at:
+ return None
+
+ end_time = experiment.completed_at or datetime.now(timezone.utc)
+ duration = end_time - experiment.started_at
+ return duration.total_seconds() / 3600
+
+
+class ModelGovernance:
+ """Main model governance system."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.model_registry = ModelRegistry(config)
+ self.model_validator = ModelValidator(config)
+ self.ab_test_manager = ABTestManager(config)
+ self.experiment_tracker = ExperimentTracker(config)
+
+ # Performance monitoring
+ self.performance_history: Dict[str, deque] = defaultdict(
+ lambda: deque(maxlen=1000)
+ )
+
+ logger.info("Model governance system initialized")
+
+ def register_model(self, metadata: ModelMetadata, model_file_path: str) -> ModelVersion:
+ """Register new model with governance."""
+ return self.model_registry.register_model(metadata, model_file_path)
+
+ async def deploy_model(self, model_id: str, version: str,
+ strategy: DeploymentStrategy = DeploymentStrategy.BLUE_GREEN,
+ validation_required: bool = True) -> Dict[str, Any]:
+ """Deploy model with governance checks."""
+ model_version = self.model_registry.get_model_version(model_id, version)
+ if not model_version:
+ return {"success": False, "error": "Model version not found"}
+
+ # Validation phase
+ if validation_required:
+ logger.info(f"Validating model {model_id}:{version}")
+ model_version.status = ModelStatus.VALIDATING
+
+ validation_results = await self.model_validator.validate_model(model_version)
+
+ if validation_results["overall_status"] != "passed":
+ model_version.status = ModelStatus.FAILED
+ return {
+ "success": False,
+ "error": "Model validation failed",
+ "validation_results": validation_results
+ }
+
+ # Deployment phase
+ logger.info(f"Deploying model {model_id}:{version} with strategy {strategy.value}")
+
+ if strategy == DeploymentStrategy.BLUE_GREEN:
+ success = self._deploy_blue_green(model_version)
+ elif strategy == DeploymentStrategy.CANARY:
+ success = self._deploy_canary(model_version)
+ elif strategy == DeploymentStrategy.A_B_TEST:
+ success = self._deploy_ab_test(model_version)
+ else:
+ success = self._deploy_rolling(model_version)
+
+ if success:
+ self.model_registry.update_model_status(model_id, version, ModelStatus.ACTIVE)
+ return {"success": True, "deployment_strategy": strategy.value}
+ else:
+ self.model_registry.update_model_status(model_id, version, ModelStatus.FAILED)
+ return {"success": False, "error": "Deployment failed"}
+
+ def _deploy_blue_green(self, model_version: ModelVersion) -> bool:
+ """Deploy using blue-green strategy."""
+ # In production, this would:
+ # 1. Set up new environment (green)
+ # 2. Deploy model to green environment
+ # 3. Run health checks
+ # 4. Switch traffic from blue to green
+ # 5. Keep blue as backup
+
+ logger.info(f"Blue-green deployment for {model_version.model_id}:{model_version.version}")
+ return True
+
+ def _deploy_canary(self, model_version: ModelVersion) -> bool:
+ """Deploy using canary strategy."""
+ # In production, this would:
+ # 1. Deploy to small percentage of traffic
+ # 2. Monitor performance
+ # 3. Gradually increase traffic
+ # 4. Rollback if issues detected
+
+ logger.info(f"Canary deployment for {model_version.model_id}:{model_version.version}")
+ return True
+
+ def _deploy_ab_test(self, model_version: ModelVersion) -> bool:
+ """Deploy as A/B test."""
+ # Get current active version for comparison
+ current_version = self.model_registry.get_active_version(model_version.model_id)
+ if not current_version:
+ return False
+
+ # Create A/B test
+ test_config = self.ab_test_manager.create_ab_test(
+ name=f"Deploy {model_version.version}",
+ model_a_id=f"{current_version.model_id}:{current_version.version}",
+ model_b_id=f"{model_version.model_id}:{model_version.version}",
+ traffic_split_percent=10 # Start with 10% traffic
+ )
+
+ self.ab_test_manager.start_ab_test(test_config.id)
+ logger.info(f"A/B test deployment for {model_version.model_id}:{model_version.version}")
+ return True
+
+ def _deploy_rolling(self, model_version: ModelVersion) -> bool:
+ """Deploy using rolling update."""
+ logger.info(f"Rolling deployment for {model_version.model_id}:{model_version.version}")
+ return True
+
+ def record_model_performance(self, model_id: str, version: str,
+ metrics: ModelPerformanceMetrics) -> None:
+ """Record model performance metrics."""
+ key = f"{model_id}:{version}"
+ self.performance_history[key].append(metrics)
+
+ # Check for performance degradation
+ self._check_performance_drift(model_id, version, metrics)
+
+ def _check_performance_drift(self, model_id: str, version: str,
+ current_metrics: ModelPerformanceMetrics) -> None:
+ """Check for performance drift and alert if needed."""
+ key = f"{model_id}:{version}"
+ history = self.performance_history[key]
+
+ if len(history) < 10: # Need history to compare
+ return
+
+ # Check accuracy drift
+ if current_metrics.accuracy is not None:
+ recent_accuracy = [m.accuracy for m in list(history)[-10:] if m.accuracy is not None]
+ if recent_accuracy:
+ avg_accuracy = sum(recent_accuracy) / len(recent_accuracy)
+ if current_metrics.accuracy < avg_accuracy * 0.95: # 5% drop
+ logger.warning(
+ f"Accuracy drift detected for {model_id}:{version}: "
+ f"{current_metrics.accuracy:.3f} vs {avg_accuracy:.3f}"
+ )
+
+ # Check latency drift
+ if current_metrics.latency_p95_ms is not None:
+ recent_latency = [m.latency_p95_ms for m in list(history)[-10:] if m.latency_p95_ms is not None]
+ if recent_latency:
+ avg_latency = sum(recent_latency) / len(recent_latency)
+ if current_metrics.latency_p95_ms > avg_latency * 1.5: # 50% increase
+ logger.warning(
+ f"Latency drift detected for {model_id}:{version}: "
+ f"{current_metrics.latency_p95_ms:.1f}ms vs {avg_latency:.1f}ms"
+ )
+
+ def get_governance_dashboard(self) -> Dict[str, Any]:
+ """Get governance dashboard data."""
+ return {
+ "models": {
+ "total_models": len(self.model_registry.list_models()),
+ "active_models": len([
+ model_id for model_id in self.model_registry.list_models()
+ if self.model_registry.get_active_version(model_id)
+ ]),
+ "pending_validation": len([
+ version for model_versions in self.model_registry.models.values()
+ for version in model_versions.values()
+ if version.status == ModelStatus.VALIDATING
+ ])
+ },
+ "experiments": {
+ "total_experiments": len(self.experiment_tracker.experiments),
+ "running_experiments": len([
+ exp for exp in self.experiment_tracker.experiments.values()
+ if exp.status == ExperimentStatus.RUNNING
+ ])
+ },
+ "ab_tests": {
+ "active_tests": len([
+ test for test in self.ab_test_manager.active_tests.values()
+ if test.status == "running"
+ ])
+ },
+ "performance_alerts": self._get_performance_alerts()
+ }
+
+ def _get_performance_alerts(self) -> List[Dict[str, Any]]:
+ """Get current performance alerts."""
+ alerts = []
+
+ # Check recent performance data for alerts
+ for key, history in self.performance_history.items():
+ if not history:
+ continue
+
+ latest = history[-1]
+ model_id, version = key.split(":")
+
+ # High error rate alert
+ if latest.error_count > latest.prediction_count * 0.05: # >5% error rate
+ alerts.append({
+ "type": "high_error_rate",
+ "model_id": model_id,
+ "version": version,
+ "error_rate": latest.error_count / latest.prediction_count if latest.prediction_count > 0 else 0,
+ "timestamp": latest.timestamp.isoformat()
+ })
+
+ # High latency alert
+ if latest.latency_p95_ms and latest.latency_p95_ms > 1000: # >1s
+ alerts.append({
+ "type": "high_latency",
+ "model_id": model_id,
+ "version": version,
+ "latency_p95_ms": latest.latency_p95_ms,
+ "timestamp": latest.timestamp.isoformat()
+ })
+
+ return alerts
+
+
+class MLOpsManager:
+ """MLOps workflow management."""
+
+ def __init__(self, config: EnterpriseConfig, governance: ModelGovernance):
+ self.config = config
+ self.governance = governance
+ self.workflows: Dict[str, Dict[str, Any]] = {}
+
+ logger.info("MLOps manager initialized")
+
+ def create_deployment_pipeline(self, name: str, model_id: str,
+ stages: List[str] = None) -> str:
+ """Create deployment pipeline."""
+ pipeline_id = f"pipeline_{secrets.token_urlsafe(8)}"
+
+ default_stages = [
+ "validation",
+ "testing",
+ "staging_deployment",
+ "performance_evaluation",
+ "production_deployment"
+ ]
+
+ pipeline = {
+ "id": pipeline_id,
+ "name": name,
+ "model_id": model_id,
+ "stages": stages or default_stages,
+ "current_stage": 0,
+ "status": "ready",
+ "created_at": datetime.now(timezone.utc),
+ "logs": []
+ }
+
+ self.workflows[pipeline_id] = pipeline
+ logger.info(f"Created deployment pipeline {pipeline_id} for model {model_id}")
+
+ return pipeline_id
+
+ async def execute_pipeline(self, pipeline_id: str) -> Dict[str, Any]:
+ """Execute deployment pipeline."""
+ pipeline = self.workflows.get(pipeline_id)
+ if not pipeline:
+ return {"success": False, "error": "Pipeline not found"}
+
+ pipeline["status"] = "running"
+ pipeline["started_at"] = datetime.now(timezone.utc)
+
+ try:
+ for stage_index, stage_name in enumerate(pipeline["stages"]):
+ pipeline["current_stage"] = stage_index
+
+ logger.info(f"Executing stage {stage_name} for pipeline {pipeline_id}")
+ stage_result = await self._execute_stage(pipeline, stage_name)
+
+ pipeline["logs"].append({
+ "stage": stage_name,
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "result": stage_result
+ })
+
+ if not stage_result.get("success", False):
+ pipeline["status"] = "failed"
+ return {
+ "success": False,
+ "error": f"Pipeline failed at stage: {stage_name}",
+ "stage_result": stage_result
+ }
+
+ pipeline["status"] = "completed"
+ pipeline["completed_at"] = datetime.now(timezone.utc)
+
+ return {"success": True, "pipeline_id": pipeline_id}
+
+ except Exception as e:
+ pipeline["status"] = "failed"
+ pipeline["error"] = str(e)
+ logger.error(f"Pipeline {pipeline_id} failed: {e}")
+ return {"success": False, "error": str(e)}
+
+ async def _execute_stage(self, pipeline: Dict[str, Any], stage_name: str) -> Dict[str, Any]:
+ """Execute individual pipeline stage."""
+ model_id = pipeline["model_id"]
+
+ if stage_name == "validation":
+ # Get latest version for validation
+ latest_version = self.governance.model_registry.get_latest_version(model_id)
+ if not latest_version:
+ return {"success": False, "error": "No model version found"}
+
+ validation_results = await self.governance.model_validator.validate_model(latest_version)
+ return {
+ "success": validation_results["overall_status"] == "passed",
+ "validation_results": validation_results
+ }
+
+ elif stage_name == "testing":
+ # Run automated tests
+ return {"success": True, "message": "Automated tests passed"}
+
+ elif stage_name == "staging_deployment":
+ # Deploy to staging environment
+ return {"success": True, "message": "Deployed to staging"}
+
+ elif stage_name == "performance_evaluation":
+ # Evaluate performance in staging
+ return {"success": True, "message": "Performance evaluation completed"}
+
+ elif stage_name == "production_deployment":
+ # Deploy to production
+ latest_version = self.governance.model_registry.get_latest_version(model_id)
+ if not latest_version:
+ return {"success": False, "error": "No model version found"}
+
+ deployment_result = await self.governance.deploy_model(
+ model_id,
+ latest_version.version,
+ DeploymentStrategy.BLUE_GREEN,
+ validation_required=False # Already validated
+ )
+
+ return deployment_result
+
+ else:
+ return {"success": True, "message": f"Executed stage: {stage_name}"}
+
+ def get_pipeline_status(self, pipeline_id: str) -> Optional[Dict[str, Any]]:
+ """Get pipeline status."""
+ return self.workflows.get(pipeline_id)
diff --git a/framework/enterprise/monitoring.py b/framework/enterprise/monitoring.py
new file mode 100644
index 0000000..39b9057
--- /dev/null
+++ b/framework/enterprise/monitoring.py
@@ -0,0 +1,954 @@
+"""
+Enterprise monitoring and observability system.
+
+This module provides comprehensive monitoring including:
+- Distributed tracing
+- Advanced metrics collection
+- Real-time alerting
+- Performance analytics
+- Business intelligence dashboards
+- SLA/SLO monitoring
+"""
+
+import time
+import asyncio
+import threading
+from datetime import datetime, timedelta, timezone
+from typing import Dict, List, Optional, Any, Callable, Union, Tuple
+from dataclasses import dataclass, field
+from collections import defaultdict, deque
+from enum import Enum
+import logging
+import json
+import secrets
+from abc import ABC, abstractmethod
+import statistics
+
+from opentelemetry import trace, metrics
+try:
+ from opentelemetry.exporter.jaeger.thrift import JaegerExporter
+ JAEGER_AVAILABLE = True
+except ImportError:
+ # Handle missing dependencies gracefully
+ JaegerExporter = None
+ JAEGER_AVAILABLE = False
+
+try:
+ from deprecated import deprecated
+ DEPRECATED_AVAILABLE = True
+except ImportError:
+ # Create a dummy decorator if deprecated is not available
+ def deprecated(reason=""):
+ def decorator(func):
+ return func
+ return decorator
+ DEPRECATED_AVAILABLE = False
+
+try:
+ from opentelemetry.exporter.prometheus import PrometheusMetricReader
+ from opentelemetry.sdk.trace import TracerProvider
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
+ from opentelemetry.sdk.metrics import MeterProvider
+ from opentelemetry.instrumentation.requests import RequestsInstrumentor
+ from prometheus_client import Counter, Histogram, Gauge, Summary, CollectorRegistry
+ OPENTELEMETRY_AVAILABLE = True
+except ImportError:
+ # Handle missing opentelemetry dependencies
+ PrometheusMetricReader = None
+ TracerProvider = None
+ BatchSpanProcessor = None
+ MeterProvider = None
+ RequestsInstrumentor = None
+ Counter = None
+ Histogram = None
+ Gauge = None
+ Summary = None
+ CollectorRegistry = None
+ OPENTELEMETRY_AVAILABLE = False
+
+from .config import EnterpriseConfig
+
+
+logger = logging.getLogger(__name__)
+
+
+class MetricType(Enum):
+ """Types of metrics."""
+ COUNTER = "counter"
+ GAUGE = "gauge"
+ HISTOGRAM = "histogram"
+ SUMMARY = "summary"
+
+
+class AlertSeverity(Enum):
+ """Alert severity levels."""
+ INFO = "info"
+ WARNING = "warning"
+ CRITICAL = "critical"
+
+
+@dataclass
+class TraceContext:
+ """Distributed trace context."""
+ trace_id: str
+ span_id: str
+ parent_span_id: Optional[str] = None
+ baggage: Dict[str, str] = field(default_factory=dict)
+
+
+@dataclass
+class MetricPoint:
+ """Individual metric data point."""
+ name: str
+ value: float
+ timestamp: datetime
+ labels: Dict[str, str] = field(default_factory=dict)
+ metric_type: MetricType = MetricType.GAUGE
+
+
+@dataclass
+class Alert:
+ """Monitoring alert."""
+ id: str
+ name: str
+ message: str
+ severity: AlertSeverity
+ metric_name: str
+ threshold_value: float
+ current_value: float
+ timestamp: datetime
+ resolved: bool = False
+ acknowledged: bool = False
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert alert to dictionary."""
+ return {
+ "id": self.id,
+ "name": self.name,
+ "message": self.message,
+ "severity": self.severity.value,
+ "metric_name": self.metric_name,
+ "threshold_value": self.threshold_value,
+ "current_value": self.current_value,
+ "timestamp": self.timestamp.isoformat(),
+ "resolved": self.resolved,
+ "acknowledged": self.acknowledged
+ }
+
+
+@dataclass
+class SLOTarget:
+ """Service Level Objective target."""
+ name: str
+ target_percentage: float
+ time_window_hours: int
+ error_budget_consumed: float = 0.0
+
+ def is_breached(self) -> bool:
+ """Check if SLO is breached."""
+ return self.error_budget_consumed > (100 - self.target_percentage)
+
+
+class DistributedTracing:
+ """Distributed tracing implementation."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.service_name = config.monitoring.tracing_service_name
+ self.sampling_rate = config.monitoring.tracing_sampling_rate
+
+ self._setup_tracing()
+ self.tracer = trace.get_tracer(self.service_name)
+
+ def _setup_tracing(self) -> None:
+ """Setup OpenTelemetry tracing."""
+ # Configure tracer provider
+ trace.set_tracer_provider(TracerProvider())
+
+ # Setup Jaeger exporter if configured
+ if self.config.monitoring.jaeger_endpoint:
+ jaeger_exporter = JaegerExporter(
+ agent_host_name="localhost",
+ agent_port=6831,
+ collector_endpoint=self.config.monitoring.jaeger_endpoint,
+ )
+
+ span_processor = BatchSpanProcessor(jaeger_exporter)
+ trace.get_tracer_provider().add_span_processor(span_processor)
+
+ # Instrument requests
+ RequestsInstrumentor().instrument()
+
+ def create_span(self, name: str, parent_context: Optional[TraceContext] = None) -> trace.Span:
+ """Create new trace span."""
+ if parent_context:
+ # Set parent context
+ context = trace.set_span_in_context(
+ trace.SpanContext(
+ trace_id=int(parent_context.trace_id, 16),
+ span_id=int(parent_context.span_id, 16),
+ is_remote=True
+ )
+ )
+ span = self.tracer.start_span(name, context=context)
+ else:
+ span = self.tracer.start_span(name)
+
+ return span
+
+ def get_current_trace_context(self) -> Optional[TraceContext]:
+ """Get current trace context."""
+ current_span = trace.get_current_span()
+ if current_span and current_span.get_span_context().is_valid:
+ context = current_span.get_span_context()
+ return TraceContext(
+ trace_id=format(context.trace_id, '032x'),
+ span_id=format(context.span_id, '016x')
+ )
+ return None
+
+ def add_span_attribute(self, key: str, value: Any) -> None:
+ """Add attribute to current span."""
+ current_span = trace.get_current_span()
+ if current_span:
+ current_span.set_attribute(key, str(value))
+
+ def add_span_event(self, name: str, attributes: Optional[Dict[str, Any]] = None) -> None:
+ """Add event to current span."""
+ current_span = trace.get_current_span()
+ if current_span:
+ current_span.add_event(name, attributes or {})
+
+
+class PrometheusMetrics:
+ """Prometheus metrics collection."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.registry = CollectorRegistry()
+
+ # Initialize common metrics
+ self._init_system_metrics()
+ self._init_application_metrics()
+
+ def _init_system_metrics(self) -> None:
+ """Initialize system-level metrics."""
+ self.request_count = Counter(
+ 'http_requests_total',
+ 'Total HTTP requests',
+ ['method', 'endpoint', 'status'],
+ registry=self.registry
+ )
+
+ self.request_duration = Histogram(
+ 'http_request_duration_seconds',
+ 'HTTP request duration',
+ ['method', 'endpoint'],
+ buckets=[0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0],
+ registry=self.registry
+ )
+
+ self.active_connections = Gauge(
+ 'active_connections',
+ 'Active connections',
+ registry=self.registry
+ )
+
+ self.memory_usage = Gauge(
+ 'memory_usage_bytes',
+ 'Memory usage in bytes',
+ ['type'],
+ registry=self.registry
+ )
+
+ self.cpu_usage = Gauge(
+ 'cpu_usage_percent',
+ 'CPU usage percentage',
+ registry=self.registry
+ )
+
+ def _init_application_metrics(self) -> None:
+ """Initialize application-specific metrics."""
+ self.inference_count = Counter(
+ 'inference_requests_total',
+ 'Total inference requests',
+ ['model', 'status', 'tenant'],
+ registry=self.registry
+ )
+
+ self.inference_duration = Histogram(
+ 'inference_duration_seconds',
+ 'Inference duration',
+ ['model'],
+ buckets=[0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0],
+ registry=self.registry
+ )
+
+ self.model_accuracy = Gauge(
+ 'model_accuracy',
+ 'Model accuracy score',
+ ['model', 'version'],
+ registry=self.registry
+ )
+
+ self.queue_size = Gauge(
+ 'request_queue_size',
+ 'Request queue size',
+ registry=self.registry
+ )
+
+ self.gpu_utilization = Gauge(
+ 'gpu_utilization_percent',
+ 'GPU utilization percentage',
+ ['gpu_id'],
+ registry=self.registry
+ )
+
+ self.model_load_time = Histogram(
+ 'model_load_time_seconds',
+ 'Model loading time',
+ ['model'],
+ registry=self.registry
+ )
+
+ def record_request(self, method: str, endpoint: str, status: str, duration: float) -> None:
+ """Record HTTP request metrics."""
+ self.request_count.labels(method=method, endpoint=endpoint, status=status).inc()
+ self.request_duration.labels(method=method, endpoint=endpoint).observe(duration)
+
+ def record_inference(self, model: str, duration: float, status: str = "success",
+ tenant: str = "default") -> None:
+ """Record inference metrics."""
+ self.inference_count.labels(model=model, status=status, tenant=tenant).inc()
+ self.inference_duration.labels(model=model).observe(duration)
+
+ def update_system_metrics(self, cpu_percent: float, memory_bytes: Dict[str, float],
+ active_conns: int) -> None:
+ """Update system metrics."""
+ self.cpu_usage.set(cpu_percent)
+ self.active_connections.set(active_conns)
+
+ for mem_type, value in memory_bytes.items():
+ self.memory_usage.labels(type=mem_type).set(value)
+
+ def update_gpu_metrics(self, gpu_utilization: Dict[str, float]) -> None:
+ """Update GPU metrics."""
+ for gpu_id, utilization in gpu_utilization.items():
+ self.gpu_utilization.labels(gpu_id=gpu_id).set(utilization)
+
+ def get_metric_families(self):
+ """Get all metric families for Prometheus scraping."""
+ return self.registry.collect()
+
+
+class AlertManager:
+ """Advanced alerting system."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.alert_rules: List[Dict[str, Any]] = []
+ self.active_alerts: Dict[str, Alert] = {}
+ self.alert_callbacks: List[Callable[[Alert], None]] = []
+
+ # Load default alert rules
+ self._load_default_alert_rules()
+
+ def _load_default_alert_rules(self) -> None:
+ """Load default alert rules from configuration."""
+ thresholds = self.config.monitoring.alert_thresholds
+
+ # Error rate alerts
+ self.add_alert_rule(
+ name="high_error_rate",
+ metric_name="error_rate",
+ threshold=thresholds.get("error_rate", 0.05),
+ condition="greater_than",
+ severity=AlertSeverity.CRITICAL,
+ message="Error rate is above threshold"
+ )
+
+ # Latency alerts
+ self.add_alert_rule(
+ name="high_latency",
+ metric_name="latency_p95_ms",
+ threshold=thresholds.get("latency_p95_ms", 1000),
+ condition="greater_than",
+ severity=AlertSeverity.WARNING,
+ message="95th percentile latency is above threshold"
+ )
+
+ # Resource usage alerts
+ self.add_alert_rule(
+ name="high_memory_usage",
+ metric_name="memory_usage_percent",
+ threshold=thresholds.get("memory_usage_percent", 85),
+ condition="greater_than",
+ severity=AlertSeverity.WARNING,
+ message="Memory usage is above threshold"
+ )
+
+ self.add_alert_rule(
+ name="high_cpu_usage",
+ metric_name="cpu_usage_percent",
+ threshold=thresholds.get("cpu_usage_percent", 80),
+ condition="greater_than",
+ severity=AlertSeverity.WARNING,
+ message="CPU usage is above threshold"
+ )
+
+ def add_alert_rule(self, name: str, metric_name: str, threshold: float,
+ condition: str, severity: AlertSeverity, message: str,
+ time_window_minutes: int = 5) -> None:
+ """Add new alert rule."""
+ rule = {
+ "name": name,
+ "metric_name": metric_name,
+ "threshold": threshold,
+ "condition": condition,
+ "severity": severity,
+ "message": message,
+ "time_window_minutes": time_window_minutes,
+ "enabled": True
+ }
+ self.alert_rules.append(rule)
+
+ def check_alerts(self, metrics: Dict[str, float]) -> List[Alert]:
+ """Check metrics against alert rules."""
+ triggered_alerts = []
+
+ for rule in self.alert_rules:
+ if not rule["enabled"]:
+ continue
+
+ metric_name = rule["metric_name"]
+ if metric_name not in metrics:
+ continue
+
+ current_value = metrics[metric_name]
+ threshold = rule["threshold"]
+ condition = rule["condition"]
+
+ # Evaluate condition
+ is_triggered = False
+ if condition == "greater_than" and current_value > threshold:
+ is_triggered = True
+ elif condition == "less_than" and current_value < threshold:
+ is_triggered = True
+ elif condition == "equals" and current_value == threshold:
+ is_triggered = True
+
+ if is_triggered:
+ alert_id = f"{rule['name']}_{int(time.time())}"
+
+ # Check if alert is already active
+ if rule["name"] not in self.active_alerts:
+ alert = Alert(
+ id=alert_id,
+ name=rule["name"],
+ message=rule["message"],
+ severity=rule["severity"],
+ metric_name=metric_name,
+ threshold_value=threshold,
+ current_value=current_value,
+ timestamp=datetime.now(timezone.utc)
+ )
+
+ self.active_alerts[rule["name"]] = alert
+ triggered_alerts.append(alert)
+
+ # Trigger callbacks
+ for callback in self.alert_callbacks:
+ try:
+ callback(alert)
+ except Exception as e:
+ logger.error(f"Alert callback failed: {e}")
+ else:
+ # Resolve alert if it exists
+ if rule["name"] in self.active_alerts:
+ self.active_alerts[rule["name"]].resolved = True
+ del self.active_alerts[rule["name"]]
+
+ return triggered_alerts
+
+ def add_alert_callback(self, callback: Callable[[Alert], None]) -> None:
+ """Add callback for alert notifications."""
+ self.alert_callbacks.append(callback)
+
+ def get_active_alerts(self) -> List[Alert]:
+ """Get all active alerts."""
+ return list(self.active_alerts.values())
+
+ def acknowledge_alert(self, alert_name: str) -> bool:
+ """Acknowledge an alert."""
+ if alert_name in self.active_alerts:
+ self.active_alerts[alert_name].acknowledged = True
+ return True
+ return False
+
+
+class SLOManager:
+ """Service Level Objective management."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.slo_targets: Dict[str, SLOTarget] = {}
+ self.metrics_history: Dict[str, deque] = defaultdict(lambda: deque(maxlen=1000))
+
+ self._define_default_slos()
+
+ def _define_default_slos(self) -> None:
+ """Define default SLO targets."""
+ # Availability SLO: 99.9% uptime
+ self.add_slo_target("availability", 99.9, 24)
+
+ # Latency SLO: 95% of requests under 500ms
+ self.add_slo_target("latency_p95", 95.0, 24)
+
+ # Error rate SLO: Less than 1% error rate
+ self.add_slo_target("error_rate", 99.0, 24)
+
+ def add_slo_target(self, name: str, target_percentage: float, time_window_hours: int) -> None:
+ """Add SLO target."""
+ self.slo_targets[name] = SLOTarget(
+ name=name,
+ target_percentage=target_percentage,
+ time_window_hours=time_window_hours
+ )
+
+ def record_slo_metric(self, slo_name: str, success: bool) -> None:
+ """Record SLO metric data point."""
+ if slo_name not in self.slo_targets:
+ return
+
+ timestamp = datetime.now(timezone.utc)
+ self.metrics_history[slo_name].append({
+ "timestamp": timestamp,
+ "success": success
+ })
+
+ # Update error budget
+ self._update_error_budget(slo_name)
+
+ def _update_error_budget(self, slo_name: str) -> None:
+ """Update error budget consumption."""
+ slo = self.slo_targets[slo_name]
+ history = self.metrics_history[slo_name]
+
+ if not history:
+ return
+
+ # Calculate success rate for time window
+ cutoff_time = datetime.now(timezone.utc) - timedelta(hours=slo.time_window_hours)
+ relevant_metrics = [
+ metric for metric in history
+ if metric["timestamp"] > cutoff_time
+ ]
+
+ if relevant_metrics:
+ success_count = sum(1 for metric in relevant_metrics if metric["success"])
+ total_count = len(relevant_metrics)
+ success_rate = (success_count / total_count) * 100
+
+ # Error budget consumed = (target - actual) / (100 - target) * 100
+ if success_rate < slo.target_percentage:
+ error_budget_consumed = ((slo.target_percentage - success_rate) /
+ (100 - slo.target_percentage)) * 100
+ slo.error_budget_consumed = min(100, error_budget_consumed)
+ else:
+ slo.error_budget_consumed = 0
+
+ def get_slo_status(self) -> Dict[str, Dict[str, Any]]:
+ """Get current SLO status."""
+ status = {}
+
+ for name, slo in self.slo_targets.items():
+ history = self.metrics_history[name]
+
+ if history:
+ # Calculate current success rate
+ cutoff_time = datetime.now(timezone.utc) - timedelta(hours=slo.time_window_hours)
+ relevant_metrics = [
+ metric for metric in history
+ if metric["timestamp"] > cutoff_time
+ ]
+
+ if relevant_metrics:
+ success_count = sum(1 for metric in relevant_metrics if metric["success"])
+ success_rate = (success_count / len(relevant_metrics)) * 100
+ else:
+ success_rate = 100.0
+ else:
+ success_rate = 100.0
+
+ status[name] = {
+ "target_percentage": slo.target_percentage,
+ "current_percentage": success_rate,
+ "error_budget_consumed": slo.error_budget_consumed,
+ "is_breached": slo.is_breached(),
+ "time_window_hours": slo.time_window_hours,
+ "remaining_error_budget": max(0, 100 - slo.error_budget_consumed)
+ }
+
+ return status
+
+
+class PerformanceAnalyzer:
+ """Performance analysis and optimization recommendations."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.performance_data: Dict[str, deque] = defaultdict(lambda: deque(maxlen=1000))
+
+ def record_performance_data(self, metric_name: str, value: float,
+ labels: Optional[Dict[str, str]] = None) -> None:
+ """Record performance data point."""
+ data_point = {
+ "timestamp": datetime.now(timezone.utc),
+ "value": value,
+ "labels": labels or {}
+ }
+ self.performance_data[metric_name].append(data_point)
+
+ def analyze_performance_trends(self, metric_name: str,
+ time_window_hours: int = 24) -> Dict[str, Any]:
+ """Analyze performance trends for a metric."""
+ if metric_name not in self.performance_data:
+ return {"error": "Metric not found"}
+
+ data = self.performance_data[metric_name]
+ cutoff_time = datetime.now(timezone.utc) - timedelta(hours=time_window_hours)
+
+ # Filter data for time window
+ relevant_data = [
+ point for point in data
+ if point["timestamp"] > cutoff_time
+ ]
+
+ if not relevant_data:
+ return {"error": "No data in time window"}
+
+ values = [point["value"] for point in relevant_data]
+
+ # Calculate statistics
+ analysis = {
+ "metric_name": metric_name,
+ "time_window_hours": time_window_hours,
+ "data_points": len(values),
+ "min": min(values),
+ "max": max(values),
+ "mean": statistics.mean(values),
+ "median": statistics.median(values),
+ "std_dev": statistics.stdev(values) if len(values) > 1 else 0
+ }
+
+ # Calculate percentiles
+ sorted_values = sorted(values)
+ analysis["percentiles"] = {
+ "p50": sorted_values[int(len(sorted_values) * 0.5)],
+ "p90": sorted_values[int(len(sorted_values) * 0.9)],
+ "p95": sorted_values[int(len(sorted_values) * 0.95)],
+ "p99": sorted_values[int(len(sorted_values) * 0.99)]
+ }
+
+ # Detect trends
+ if len(values) >= 10:
+ # Simple trend detection using linear regression slope
+ x_values = list(range(len(values)))
+ slope = self._calculate_trend_slope(x_values, values)
+
+ if slope > 0.01:
+ analysis["trend"] = "increasing"
+ elif slope < -0.01:
+ analysis["trend"] = "decreasing"
+ else:
+ analysis["trend"] = "stable"
+
+ analysis["trend_slope"] = slope
+ else:
+ analysis["trend"] = "insufficient_data"
+
+ return analysis
+
+ def _calculate_trend_slope(self, x_values: List[float], y_values: List[float]) -> float:
+ """Calculate linear regression slope."""
+ n = len(x_values)
+ sum_x = sum(x_values)
+ sum_y = sum(y_values)
+ sum_xy = sum(x * y for x, y in zip(x_values, y_values))
+ sum_x2 = sum(x * x for x in x_values)
+
+ slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x)
+ return slope
+
+ def get_optimization_recommendations(self, performance_analysis: Dict[str, Any]) -> List[str]:
+ """Generate optimization recommendations based on performance analysis."""
+ recommendations = []
+ metric_name = performance_analysis.get("metric_name", "")
+
+ # High latency recommendations
+ if "latency" in metric_name or "duration" in metric_name:
+ p95 = performance_analysis.get("percentiles", {}).get("p95", 0)
+ if p95 > 1000: # > 1 second
+ recommendations.extend([
+ "Consider enabling model optimization (TensorRT, ONNX)",
+ "Implement model caching for repeated requests",
+ "Optimize batch processing configuration",
+ "Review GPU memory allocation"
+ ])
+ elif p95 > 500: # > 500ms
+ recommendations.extend([
+ "Consider using FP16 precision for faster inference",
+ "Implement request batching optimization",
+ "Review model complexity vs accuracy trade-offs"
+ ])
+
+ # Memory usage recommendations
+ if "memory" in metric_name:
+ max_usage = performance_analysis.get("max", 0)
+ if max_usage > 0.8: # > 80% usage
+ recommendations.extend([
+ "Implement memory pooling",
+ "Consider model quantization to reduce memory footprint",
+ "Optimize batch sizes for memory efficiency",
+ "Enable garbage collection tuning"
+ ])
+
+ # Error rate recommendations
+ if "error" in metric_name:
+ mean_error_rate = performance_analysis.get("mean", 0)
+ if mean_error_rate > 0.01: # > 1% error rate
+ recommendations.extend([
+ "Implement circuit breaker pattern",
+ "Add retry logic with exponential backoff",
+ "Review input validation and error handling",
+ "Monitor model drift and accuracy"
+ ])
+
+ # Trend-based recommendations
+ trend = performance_analysis.get("trend", "")
+ if trend == "increasing":
+ if "latency" in metric_name or "duration" in metric_name:
+ recommendations.append("Performance degradation detected - investigate recent changes")
+ elif "memory" in metric_name:
+ recommendations.append("Memory usage increasing - check for memory leaks")
+ elif "error" in metric_name:
+ recommendations.append("Error rate increasing - investigate system health")
+
+ return recommendations
+
+
+class EnterpriseMonitor:
+ """Main enterprise monitoring system."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+
+ # Initialize components
+ self.distributed_tracing = DistributedTracing(config) if config.monitoring.enable_tracing else None
+ self.prometheus_metrics = PrometheusMetrics(config) if config.monitoring.enable_metrics else None
+ self.alert_manager = AlertManager(config) if config.monitoring.enable_alerting else None
+ self.slo_manager = SLOManager(config)
+ self.performance_analyzer = PerformanceAnalyzer(config)
+
+ # Monitoring state
+ self.is_running = False
+ self.monitoring_thread: Optional[threading.Thread] = None
+
+ logger.info("Enterprise monitoring system initialized")
+
+ def start_monitoring(self) -> None:
+ """Start background monitoring."""
+ if self.is_running:
+ return
+
+ self.is_running = True
+ self.monitoring_thread = threading.Thread(target=self._monitoring_loop, daemon=True)
+ self.monitoring_thread.start()
+ logger.info("Monitoring system started")
+
+ def stop_monitoring(self) -> None:
+ """Stop background monitoring."""
+ self.is_running = False
+ if self.monitoring_thread:
+ self.monitoring_thread.join(timeout=5)
+ logger.info("Monitoring system stopped")
+
+ def _monitoring_loop(self) -> None:
+ """Background monitoring loop."""
+ while self.is_running:
+ try:
+ # Collect system metrics
+ self._collect_system_metrics()
+
+ # Check alerts
+ if self.alert_manager:
+ metrics = self._get_current_metrics()
+ self.alert_manager.check_alerts(metrics)
+
+ # Update SLO tracking
+ self._update_slo_tracking()
+
+ time.sleep(self.config.monitoring.health_check_interval_seconds)
+
+ except Exception as e:
+ logger.error(f"Monitoring loop error: {e}")
+ time.sleep(5) # Brief pause on error
+
+ def _collect_system_metrics(self) -> None:
+ """Collect system-level metrics."""
+ try:
+ import psutil
+
+ # CPU usage
+ cpu_percent = psutil.cpu_percent(interval=1)
+
+ # Memory usage
+ memory = psutil.virtual_memory()
+ memory_metrics = {
+ "total": memory.total,
+ "available": memory.available,
+ "used": memory.used,
+ "percent": memory.percent
+ }
+
+ # Active connections (approximate)
+ active_conns = len(psutil.net_connections())
+
+ # Update Prometheus metrics if available
+ if self.prometheus_metrics:
+ self.prometheus_metrics.update_system_metrics(
+ cpu_percent, memory_metrics, active_conns
+ )
+
+ # Record in performance analyzer
+ self.performance_analyzer.record_performance_data("cpu_usage_percent", cpu_percent)
+ self.performance_analyzer.record_performance_data("memory_usage_percent", memory.percent)
+
+ except ImportError:
+ # psutil not available
+ pass
+ except Exception as e:
+ logger.warning(f"Failed to collect system metrics: {e}")
+
+ def _get_current_metrics(self) -> Dict[str, float]:
+ """Get current metric values for alerting."""
+ metrics = {}
+
+ # Get latest values from performance analyzer
+ for metric_name, data in self.performance_analyzer.performance_data.items():
+ if data:
+ latest = data[-1]
+ metrics[metric_name] = latest["value"]
+
+ return metrics
+
+ def _update_slo_tracking(self) -> None:
+ """Update SLO tracking with current system state."""
+ # For now, assume system is healthy
+ # In production, this would check actual service health
+ self.slo_manager.record_slo_metric("availability", True)
+
+ def record_request(self, method: str, endpoint: str, status: str,
+ duration: float, user_id: Optional[str] = None) -> None:
+ """Record HTTP request for monitoring."""
+ # Prometheus metrics
+ if self.prometheus_metrics:
+ self.prometheus_metrics.record_request(method, endpoint, status, duration)
+
+ # Performance analysis
+ self.performance_analyzer.record_performance_data(
+ "request_duration", duration,
+ {"method": method, "endpoint": endpoint, "status": status}
+ )
+
+ # SLO tracking
+ success = status.startswith("2") # 2xx status codes
+ self.slo_manager.record_slo_metric("error_rate", success)
+ self.slo_manager.record_slo_metric("latency_p95", duration < 0.5) # Under 500ms
+
+ # Distributed tracing
+ if self.distributed_tracing:
+ self.distributed_tracing.add_span_attribute("http.method", method)
+ self.distributed_tracing.add_span_attribute("http.status_code", status)
+ self.distributed_tracing.add_span_attribute("user_id", user_id or "anonymous")
+
+ def record_inference(self, model: str, duration: float, status: str = "success",
+ batch_size: int = 1, tenant: str = "default") -> None:
+ """Record inference request for monitoring."""
+ # Prometheus metrics
+ if self.prometheus_metrics:
+ self.prometheus_metrics.record_inference(model, duration, status, tenant)
+
+ # Performance analysis
+ self.performance_analyzer.record_performance_data(
+ "inference_duration", duration,
+ {"model": model, "status": status, "batch_size": str(batch_size)}
+ )
+
+ # Distributed tracing
+ if self.distributed_tracing:
+ self.distributed_tracing.add_span_attribute("model.name", model)
+ self.distributed_tracing.add_span_attribute("model.batch_size", batch_size)
+ self.distributed_tracing.add_span_attribute("inference.status", status)
+
+ def get_monitoring_dashboard_data(self) -> Dict[str, Any]:
+ """Get data for monitoring dashboard."""
+ dashboard_data = {
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "slo_status": self.slo_manager.get_slo_status(),
+ "active_alerts": [],
+ "system_metrics": self._get_current_metrics(),
+ "performance_trends": {}
+ }
+
+ # Get active alerts
+ if self.alert_manager:
+ dashboard_data["active_alerts"] = [
+ alert.to_dict() for alert in self.alert_manager.get_active_alerts()
+ ]
+
+ # Get performance trends for key metrics
+ key_metrics = ["cpu_usage_percent", "memory_usage_percent", "inference_duration"]
+ for metric in key_metrics:
+ if metric in self.performance_analyzer.performance_data:
+ trends = self.performance_analyzer.analyze_performance_trends(metric, 1) # Last hour
+ dashboard_data["performance_trends"][metric] = trends
+
+ return dashboard_data
+
+ def get_health_status(self) -> Dict[str, Any]:
+ """Get overall system health status."""
+ health = {
+ "status": "healthy",
+ "timestamp": datetime.now(timezone.utc).isoformat(),
+ "components": {}
+ }
+
+ # Check monitoring components
+ health["components"]["monitoring"] = {
+ "status": "up" if self.is_running else "down",
+ "tracing_enabled": self.distributed_tracing is not None,
+ "metrics_enabled": self.prometheus_metrics is not None,
+ "alerting_enabled": self.alert_manager is not None
+ }
+
+ # Check SLO breaches
+ slo_status = self.slo_manager.get_slo_status()
+ breached_slos = [name for name, status in slo_status.items() if status["is_breached"]]
+
+ if breached_slos:
+ health["status"] = "degraded"
+ health["slo_breaches"] = breached_slos
+
+ # Check active critical alerts
+ if self.alert_manager:
+ critical_alerts = [
+ alert for alert in self.alert_manager.get_active_alerts()
+ if alert.severity == AlertSeverity.CRITICAL
+ ]
+
+ if critical_alerts:
+ health["status"] = "unhealthy"
+ health["critical_alerts"] = [alert.name for alert in critical_alerts]
+
+ return health
diff --git a/framework/enterprise/security.py b/framework/enterprise/security.py
new file mode 100644
index 0000000..ac4e728
--- /dev/null
+++ b/framework/enterprise/security.py
@@ -0,0 +1,710 @@
+"""
+Enterprise security management system.
+
+This module provides comprehensive security features including:
+- Encryption at rest and in transit
+- Input validation and sanitization
+- Rate limiting and DDoS protection
+- Audit logging
+- Threat detection
+- Security headers
+"""
+
+import hashlib
+import hmac
+import secrets
+import time
+import re
+from datetime import datetime, timedelta, timezone
+from typing import Dict, List, Optional, Any, Union, Tuple, Set
+from dataclasses import dataclass, field
+from enum import Enum
+import logging
+from collections import defaultdict, deque
+from abc import ABC, abstractmethod
+import json
+import base64
+
+from cryptography.fernet import Fernet
+from cryptography.hazmat.primitives import hashes, serialization
+from cryptography.hazmat.primitives.asymmetric import rsa, padding
+from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
+from cryptography.hazmat.backends import default_backend
+
+try:
+ import magic
+ HAS_MAGIC = True
+except ImportError:
+ HAS_MAGIC = False
+ magic = None
+
+from .config import EnterpriseConfig, EncryptionAlgorithm
+
+
+logger = logging.getLogger(__name__)
+
+
+class ThreatLevel(Enum):
+ """Security threat levels."""
+ LOW = "low"
+ MEDIUM = "medium"
+ HIGH = "high"
+ CRITICAL = "critical"
+
+
+class SecurityEvent(Enum):
+ """Security event types."""
+ LOGIN_SUCCESS = "login_success"
+ LOGIN_FAILURE = "login_failure"
+ AUTHENTICATION_ERROR = "auth_error"
+ AUTHORIZATION_ERROR = "authz_error"
+ RATE_LIMIT_EXCEEDED = "rate_limit_exceeded"
+ INVALID_INPUT = "invalid_input"
+ SUSPICIOUS_ACTIVITY = "suspicious_activity"
+ DATA_ACCESS = "data_access"
+ MODEL_ACCESS = "model_access"
+ ADMIN_ACTION = "admin_action"
+
+
+@dataclass
+class SecurityAlert:
+ """Security alert model."""
+ id: str
+ event_type: SecurityEvent
+ threat_level: ThreatLevel
+ message: str
+ details: Dict[str, Any]
+ timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ user_id: Optional[str] = None
+ ip_address: Optional[str] = None
+ resolved: bool = False
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert alert to dictionary."""
+ return {
+ "id": self.id,
+ "event_type": self.event_type.value,
+ "threat_level": self.threat_level.value,
+ "message": self.message,
+ "details": self.details,
+ "timestamp": self.timestamp.isoformat(),
+ "user_id": self.user_id,
+ "ip_address": self.ip_address,
+ "resolved": self.resolved
+ }
+
+
+@dataclass
+class AuditLogEntry:
+ """Audit log entry."""
+ id: str
+ timestamp: datetime
+ user_id: Optional[str]
+ action: str
+ resource: str
+ details: Dict[str, Any]
+ ip_address: Optional[str] = None
+ user_agent: Optional[str] = None
+ tenant_id: Optional[str] = None
+ success: bool = True
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert audit entry to dictionary."""
+ return {
+ "id": self.id,
+ "timestamp": self.timestamp.isoformat(),
+ "user_id": self.user_id,
+ "action": self.action,
+ "resource": self.resource,
+ "details": self.details,
+ "ip_address": self.ip_address,
+ "user_agent": self.user_agent,
+ "tenant_id": self.tenant_id,
+ "success": self.success
+ }
+
+
+class EncryptionManager:
+ """Encryption and decryption management."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.algorithm = config.security.encryption_algorithm
+ self._initialize_encryption()
+
+ def _initialize_encryption(self) -> None:
+ """Initialize encryption keys and ciphers."""
+ # Generate or load encryption key
+ self.encryption_key = self._get_or_generate_key()
+
+ if self.algorithm == EncryptionAlgorithm.AES_256_GCM:
+ self.cipher_suite = Fernet(base64.urlsafe_b64encode(self.encryption_key[:32]))
+ else:
+ # For other algorithms, implement as needed
+ self.cipher_suite = Fernet(base64.urlsafe_b64encode(self.encryption_key[:32]))
+
+ def _get_or_generate_key(self) -> bytes:
+ """Get existing encryption key or generate new one."""
+ # In production, this would load from secure key management system
+ key_file = "encryption.key"
+ try:
+ with open(key_file, "rb") as f:
+ return f.read()
+ except FileNotFoundError:
+ key = secrets.token_bytes(32) # 256-bit key
+ with open(key_file, "wb") as f:
+ f.write(key)
+ return key
+
+ def encrypt_data(self, data: Union[str, bytes]) -> str:
+ """Encrypt data and return base64 encoded result."""
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+
+ encrypted = self.cipher_suite.encrypt(data)
+ return base64.urlsafe_b64encode(encrypted).decode('utf-8')
+
+ def decrypt_data(self, encrypted_data: str) -> bytes:
+ """Decrypt base64 encoded data."""
+ encrypted_bytes = base64.urlsafe_b64decode(encrypted_data.encode('utf-8'))
+ return self.cipher_suite.decrypt(encrypted_bytes)
+
+ def encrypt_file(self, file_path: str, output_path: Optional[str] = None) -> str:
+ """Encrypt file and return path to encrypted file."""
+ if output_path is None:
+ output_path = f"{file_path}.enc"
+
+ with open(file_path, "rb") as f:
+ data = f.read()
+
+ encrypted_data = self.cipher_suite.encrypt(data)
+
+ with open(output_path, "wb") as f:
+ f.write(encrypted_data)
+
+ return output_path
+
+ def decrypt_file(self, encrypted_file_path: str, output_path: Optional[str] = None) -> str:
+ """Decrypt file and return path to decrypted file."""
+ if output_path is None:
+ output_path = encrypted_file_path.replace(".enc", "")
+
+ with open(encrypted_file_path, "rb") as f:
+ encrypted_data = f.read()
+
+ decrypted_data = self.cipher_suite.decrypt(encrypted_data)
+
+ with open(output_path, "wb") as f:
+ f.write(decrypted_data)
+
+ return output_path
+
+ def generate_hash(self, data: Union[str, bytes], salt: Optional[bytes] = None) -> str:
+ """Generate secure hash of data."""
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+
+ if salt is None:
+ salt = secrets.token_bytes(32)
+
+ hash_obj = hashlib.pbkdf2_hmac('sha256', data, salt, 100000)
+ return base64.urlsafe_b64encode(salt + hash_obj).decode('utf-8')
+
+ def verify_hash(self, data: Union[str, bytes], hash_value: str) -> bool:
+ """Verify data against hash."""
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+
+ try:
+ hash_bytes = base64.urlsafe_b64decode(hash_value.encode('utf-8'))
+ salt = hash_bytes[:32]
+ stored_hash = hash_bytes[32:]
+
+ new_hash = hashlib.pbkdf2_hmac('sha256', data, salt, 100000)
+ return hmac.compare_digest(stored_hash, new_hash)
+ except Exception:
+ return False
+
+
+class InputValidator:
+ """Input validation and sanitization."""
+
+ def __init__(self, config: EnterpriseConfig):
+ self.config = config
+ self.max_size = config.security.max_request_size_mb * 1024 * 1024
+ self.allowed_extensions = set(config.security.allowed_file_types)
+
+ # Compile regex patterns for common attacks
+ self.sql_injection_pattern = re.compile(
+ r"(\bUNION\b|\bSELECT\b|\bINSERT\b|\bDELETE\b|\bDROP\b|\bUPDATE\b)",
+ re.IGNORECASE
+ )
+ self.xss_pattern = re.compile(
+ r"("}
+ assert not security_manager.validate_input(malicious_input)
+
+ def test_encryption_decryption(self, security_manager):
+ """Test data encryption and decryption."""
+ if not security_manager.encryption_enabled:
+ pytest.skip("Encryption not enabled")
+
+ original_data = "sensitive data"
+
+ # Encrypt
+ encrypted = security_manager.encrypt_data(original_data)
+ assert encrypted != original_data
+
+ # Decrypt
+ decrypted = security_manager.decrypt_data(encrypted)
+ assert decrypted == original_data
+
+
+@pytest.mark.skipif(ModelGovernance is None, reason="Model governance not available")
+class TestModelGovernance:
+ """Test model governance."""
+
+ @pytest.fixture
+ def enterprise_config(self):
+ """Create enterprise configuration."""
+ return EnterpriseConfig(
+ environment="production",
+ security=SecurityConfig(enable_model_validation=True)
+ )
+
+ @pytest.fixture
+ def model_governance(self, enterprise_config):
+ """Create model governance."""
+ return ModelGovernance(enterprise_config)
+
+ def test_governance_initialization(self, model_governance, enterprise_config):
+ """Test model governance initialization."""
+ assert model_governance.config == enterprise_config
+ assert isinstance(model_governance.validator, ModelValidator)
+
+ @pytest.mark.asyncio
+ async def test_model_registration(self, model_governance):
+ """Test model registration."""
+ model_info = {
+ "name": "test_model",
+ "version": "1.0",
+ "model_type": "classification",
+ "checksum": "abc123"
+ }
+
+ success = await model_governance.register_model(model_info)
+ assert success
+
+ # Check if model is registered
+ models = model_governance.list_models()
+ assert len(models) > 0
+ assert any(m["name"] == "test_model" for m in models)
+
+ @pytest.mark.asyncio
+ async def test_model_validation(self, model_governance):
+ """Test model validation."""
+ model_version = Mock()
+ model_version.name = "test_model"
+ model_version.version = "1.0"
+ model_version.file_path = "/path/to/model.pt"
+
+ with patch.object(model_governance.validator, 'validate_model') as mock_validate:
+ mock_validate.return_value = {"valid": True, "checks_passed": 5}
+
+ result = await model_governance.validate_model(model_version)
+
+ assert result["valid"]
+ assert result["checks_passed"] == 5
+ mock_validate.assert_called_once_with(model_version)
+
+ def test_model_approval_workflow(self, model_governance):
+ """Test model approval workflow."""
+ model_id = "test_model_v1"
+
+ # Submit for approval
+ submitted = model_governance.submit_for_approval(model_id, "user_123")
+ assert submitted
+
+ # Approve model
+ approved = model_governance.approve_model(model_id, "admin_456")
+ assert approved
+
+ # Check approval status
+ status = model_governance.get_approval_status(model_id)
+ assert status["approved"]
+ assert status["approved_by"] == "admin_456"
+
+
+@pytest.mark.skipif(ABTestManager is None, reason="A/B test manager not available")
+class TestABTestManager:
+ """Test A/B testing manager."""
+
+ @pytest.fixture
+ def enterprise_config(self):
+ """Create enterprise configuration."""
+ return EnterpriseConfig()
+
+ @pytest.fixture
+ def ab_test_manager(self, enterprise_config):
+ """Create A/B test manager."""
+ return ABTestManager(enterprise_config)
+
+ def test_ab_test_manager_initialization(self, ab_test_manager):
+ """Test A/B test manager initialization."""
+ assert len(ab_test_manager.active_tests) == 0
+
+ def test_create_ab_test(self, ab_test_manager):
+ """Test creating A/B test."""
+ test_config = ab_test_manager.create_ab_test(
+ name="Model Comparison Test",
+ model_a_id="model_v1",
+ model_b_id="model_v2",
+ traffic_split_percent=50,
+ success_metrics=["accuracy", "latency"]
+ )
+
+ assert test_config.name == "Model Comparison Test"
+ assert test_config.model_a_id == "model_v1"
+ assert test_config.model_b_id == "model_v2"
+ assert test_config.traffic_split_percent == 50
+ assert "accuracy" in test_config.success_metrics
+ assert test_config.id in ab_test_manager.active_tests
+
+ def test_start_ab_test(self, ab_test_manager):
+ """Test starting A/B test."""
+ test_config = ab_test_manager.create_ab_test(
+ "Test", "model_a", "model_b", 30
+ )
+
+ success = ab_test_manager.start_ab_test(test_config.id)
+ assert success
+ assert test_config.status == "running"
+ assert test_config.started_at is not None
+
+ def test_record_test_results(self, ab_test_manager):
+ """Test recording test results."""
+ test_config = ab_test_manager.create_ab_test(
+ "Test", "model_a", "model_b", 50
+ )
+ ab_test_manager.start_ab_test(test_config.id)
+
+ # Record results for both models
+ ab_test_manager.record_test_result(
+ test_config.id, "model_a", {"accuracy": 0.85, "latency": 0.05}
+ )
+ ab_test_manager.record_test_result(
+ test_config.id, "model_b", {"accuracy": 0.88, "latency": 0.04}
+ )
+
+ # Check results were recorded
+ assert test_config.id in ab_test_manager.test_results
+ assert "model_a" in ab_test_manager.test_results[test_config.id]
+ assert "model_b" in ab_test_manager.test_results[test_config.id]
+
+ def test_analyze_test_results(self, ab_test_manager):
+ """Test analyzing test results."""
+ test_config = ab_test_manager.create_ab_test(
+ "Test", "model_a", "model_b", 50
+ )
+ ab_test_manager.start_ab_test(test_config.id)
+
+ # Record multiple results
+ for i in range(100):
+ ab_test_manager.record_test_result(
+ test_config.id, "model_a",
+ {"accuracy": 0.85 + (i % 10) * 0.01, "latency": 0.05}
+ )
+ ab_test_manager.record_test_result(
+ test_config.id, "model_b",
+ {"accuracy": 0.88 + (i % 10) * 0.01, "latency": 0.04}
+ )
+
+ analysis = ab_test_manager.analyze_test_results(test_config.id)
+
+ assert analysis["test_id"] == test_config.id
+ assert "models" in analysis
+ assert "model_a" in analysis["models"]
+ assert "model_b" in analysis["models"]
+ assert "statistical_significance" in analysis
+ assert "recommendation" in analysis
+
+ def test_stop_ab_test(self, ab_test_manager):
+ """Test stopping A/B test."""
+ test_config = ab_test_manager.create_ab_test(
+ "Test", "model_a", "model_b", 50
+ )
+ ab_test_manager.start_ab_test(test_config.id)
+
+ success = ab_test_manager.stop_test(test_config.id)
+ assert success
+ assert test_config.status == "completed"
+
+
+@pytest.mark.skipif(EnterpriseMonitoring is None, reason="Enterprise monitoring not available")
+class TestEnterpriseMonitoring:
+ """Test enterprise monitoring."""
+
+ @pytest.fixture
+ def enterprise_config(self):
+ """Create enterprise configuration."""
+ return EnterpriseConfig()
+
+ @pytest.fixture
+ def enterprise_monitoring(self, enterprise_config):
+ """Create enterprise monitoring."""
+ return EnterpriseMonitoring(enterprise_config)
+
+ def test_monitoring_initialization(self, enterprise_monitoring):
+ """Test monitoring initialization."""
+ assert enterprise_monitoring.metrics_storage is not None
+ assert enterprise_monitoring.alert_manager is not None
+
+ def test_metric_collection(self, enterprise_monitoring):
+ """Test metric collection."""
+ # Record metrics
+ enterprise_monitoring.record_metric("inference_count", 1, {"model": "test_model"})
+ enterprise_monitoring.record_metric("latency", 0.05, {"model": "test_model"})
+
+ # Get metrics
+ metrics = enterprise_monitoring.get_metrics("inference_count")
+ assert len(metrics) > 0
+
+ def test_alert_triggering(self, enterprise_monitoring):
+ """Test alert triggering."""
+ # Set up alert rule
+ enterprise_monitoring.add_alert_rule(
+ "high_latency",
+ condition="latency > 0.1",
+ severity="warning"
+ )
+
+ # Trigger alert condition
+ enterprise_monitoring.record_metric("latency", 0.15, {"model": "slow_model"})
+
+ # Check if alert was triggered
+ alerts = enterprise_monitoring.get_active_alerts()
+ assert len(alerts) > 0
+ assert any(alert["rule"] == "high_latency" for alert in alerts)
+
+ def test_audit_logging(self, enterprise_monitoring):
+ """Test audit logging."""
+ # Log audit event
+ enterprise_monitoring.log_audit_event(
+ "model_access",
+ user_id="test_user",
+ details={"model": "sensitive_model", "action": "inference"}
+ )
+
+ # Retrieve audit logs
+ audit_logs = enterprise_monitoring.get_audit_logs(
+ start_time=datetime.now(timezone.utc).timestamp() - 3600
+ )
+
+ assert len(audit_logs) > 0
+ assert any(log["event_type"] == "model_access" for log in audit_logs)
+
+
+class TestEnterpriseIntegration:
+ """Integration tests for enterprise features."""
+
+ @pytest.mark.skipif(any(cls is None for cls in [
+ EnterpriseConfig, AuthManager, SecurityManager, ModelGovernance
+ ]), reason="Enterprise features not available")
+ def test_complete_enterprise_workflow(self):
+ """Test complete enterprise workflow."""
+ # Create enterprise config
+ config = EnterpriseConfig(
+ environment="production",
+ auth=AuthConfig(provider=AuthProvider.OAUTH2),
+ security=SecurityConfig(enable_encryption_at_rest=True)
+ )
+
+ # Initialize managers
+ auth_manager = AuthManager(config.auth)
+ security_manager = SecurityManager(config.security)
+ model_governance = ModelGovernance(config)
+
+ # Test workflow would involve:
+ # 1. User authentication
+ # 2. Input validation
+ # 3. Model access control
+ # 4. Inference execution
+ # 5. Result encryption
+ # 6. Audit logging
+
+ # Mock successful workflow
+ assert auth_manager is not None
+ assert security_manager is not None
+ assert model_governance is not None
+
+ @pytest.mark.skipif(ABTestManager is None, reason="A/B testing not available")
+ def test_ab_testing_workflow(self):
+ """Test A/B testing workflow."""
+ config = EnterpriseConfig()
+ ab_manager = ABTestManager(config)
+
+ # Complete A/B testing workflow
+ test = ab_manager.create_ab_test("Performance Test", "v1", "v2", 50)
+ ab_manager.start_ab_test(test.id)
+
+ # Simulate traffic and results
+ for i in range(50):
+ model_id = "v1" if i % 2 == 0 else "v2"
+ accuracy = 0.85 + (0.03 if model_id == "v2" else 0)
+ ab_manager.record_test_result(test.id, model_id, {"accuracy": accuracy})
+
+ analysis = ab_manager.analyze_test_results(test.id)
+ ab_manager.stop_test(test.id)
+
+ assert analysis["recommendation"] in ["deploy_model_b", "keep_model_a", "no_significant_difference"]
+
+
+class TestEnterpriseErrorHandling:
+ """Test error handling in enterprise features."""
+
+ @pytest.mark.skipif(AuthManager is None, reason="Auth manager not available")
+ def test_authentication_error_handling(self):
+ """Test authentication error handling."""
+ config = AuthConfig(provider=AuthProvider.OAUTH2)
+ auth_manager = AuthManager(config)
+
+ # Test with invalid configuration
+ config.jwt_secret_key = None
+
+ with pytest.raises(Exception):
+ auth_manager.generate_token("test_user")
+
+ @pytest.mark.skipif(SecurityManager is None, reason="Security manager not available")
+ def test_security_error_handling(self):
+ """Test security error handling."""
+ config = SecurityConfig()
+ security_manager = SecurityManager(config)
+
+ # Test encryption with no key
+ if security_manager.encryption_enabled:
+ with pytest.raises(Exception):
+ security_manager.encrypt_data("test", key=None)
+
+ @pytest.mark.skipif(ModelGovernance is None, reason="Model governance not available")
+ @pytest.mark.asyncio
+ async def test_governance_error_handling(self):
+ """Test model governance error handling."""
+ config = EnterpriseConfig()
+ governance = ModelGovernance(config)
+
+ # Test with invalid model info
+ invalid_model_info = {"incomplete": "data"}
+
+ with pytest.raises(Exception):
+ await governance.register_model(invalid_model_info)
diff --git a/tests/unit/test_framework.py b/tests/unit/test_framework.py
new file mode 100644
index 0000000..0316c50
--- /dev/null
+++ b/tests/unit/test_framework.py
@@ -0,0 +1,686 @@
+"""Tests for main framework interface."""
+
+import pytest
+import asyncio
+import torch
+import torch.nn as nn
+from unittest.mock import Mock, patch, MagicMock, AsyncMock
+from pathlib import Path
+
+from framework import (
+ TorchInferenceFramework,
+ create_classification_framework,
+ create_detection_framework,
+ create_segmentation_framework,
+ predict_image_classification,
+ predict_object_detection,
+ predict_segmentation,
+ get_global_framework,
+ set_global_framework,
+ create_optimized_framework
+)
+from framework.core.config import InferenceConfig
+
+
+class TestTorchInferenceFramework:
+ """Test main framework interface."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create simple test model."""
+ return nn.Sequential(
+ nn.Linear(10, 20),
+ nn.ReLU(),
+ nn.Linear(20, 5)
+ )
+
+ @pytest.fixture
+ def framework(self, test_config):
+ """Create framework instance."""
+ return TorchInferenceFramework(test_config)
+
+ def test_framework_initialization(self, framework, test_config):
+ """Test framework initialization."""
+ assert framework.config == test_config
+ assert not framework._initialized
+ assert not framework._engine_running
+ assert framework.model is None
+ assert framework.engine is None
+
+ def test_framework_initialization_with_default_config(self):
+ """Test framework initialization with default config."""
+ with patch('framework.core.config.get_global_config') as mock_config:
+ mock_config.return_value = InferenceConfig()
+
+ framework = TorchInferenceFramework()
+
+ assert framework.config is not None
+ mock_config.assert_called_once()
+
+ def test_load_model(self, framework, simple_model, temp_model_dir):
+ """Test model loading."""
+ model_path = temp_model_dir / "test_model.pt"
+ torch.save(simple_model, model_path)
+
+ with patch('framework.load_model') as mock_load:
+ mock_model = Mock()
+ mock_model.is_loaded = True
+ mock_load.return_value = mock_model
+
+ with patch('framework.create_inference_engine') as mock_engine:
+ mock_engine_instance = Mock()
+ mock_engine.return_value = mock_engine_instance
+
+ framework.load_model(model_path, "test_model")
+
+ assert framework._initialized
+ assert framework.model == mock_model
+ assert framework.engine == mock_engine_instance
+ mock_load.assert_called_once_with(model_path, framework.config)
+
+ def test_load_model_with_auto_name(self, framework, simple_model, temp_model_dir):
+ """Test model loading with automatic name generation."""
+ model_path = temp_model_dir / "auto_name_model.pt"
+ torch.save(simple_model, model_path)
+
+ with patch('framework.load_model') as mock_load:
+ mock_model = Mock()
+ mock_load.return_value = mock_model
+
+ with patch.object(framework.model_manager, 'register_model') as mock_register:
+ framework.load_model(model_path)
+
+ # Should use file stem as model name
+ mock_register.assert_called_once_with("auto_name_model", mock_model)
+
+ def test_load_model_error(self, framework):
+ """Test model loading error handling."""
+ with patch('framework.load_model') as mock_load:
+ mock_load.side_effect = Exception("Load failed")
+
+ with pytest.raises(Exception):
+ framework.load_model("nonexistent.pt")
+
+ @pytest.mark.asyncio
+ async def test_start_stop_engine(self, framework):
+ """Test starting and stopping inference engine."""
+ # Mock model and engine
+ mock_model = Mock()
+ mock_engine = AsyncMock()
+ framework.model = mock_model
+ framework.engine = mock_engine
+ framework._initialized = True
+
+ # Start engine
+ await framework.start_engine()
+
+ assert framework._engine_running
+ mock_engine.start.assert_called_once()
+
+ # Stop engine
+ await framework.stop_engine()
+
+ assert not framework._engine_running
+ mock_engine.stop.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_start_engine_not_initialized(self, framework):
+ """Test starting engine when not initialized."""
+ with pytest.raises(RuntimeError):
+ await framework.start_engine()
+
+ def test_predict_sync(self, framework):
+ """Test synchronous prediction."""
+ mock_model = Mock()
+ mock_model.predict.return_value = {"prediction": "test_result"}
+ framework.model = mock_model
+ framework._initialized = True
+
+ result = framework.predict([1, 2, 3])
+
+ assert result == {"prediction": "test_result"}
+ mock_model.predict.assert_called_once_with([1, 2, 3])
+
+ def test_predict_sync_not_initialized(self, framework):
+ """Test synchronous prediction when not initialized."""
+ with pytest.raises(RuntimeError):
+ framework.predict([1, 2, 3])
+
+ @pytest.mark.asyncio
+ async def test_predict_async(self, framework):
+ """Test asynchronous prediction."""
+ mock_model = Mock()
+ mock_engine = AsyncMock()
+ mock_engine.predict.return_value = {"prediction": "async_result"}
+
+ framework.model = mock_model
+ framework.engine = mock_engine
+ framework._initialized = True
+ framework._engine_running = True
+
+ result = await framework.predict_async([1, 2, 3], priority=1, timeout=5.0)
+
+ assert result == {"prediction": "async_result"}
+ mock_engine.predict.assert_called_once_with([1, 2, 3], 1, 5.0)
+
+ @pytest.mark.asyncio
+ async def test_predict_async_engine_not_running(self, framework):
+ """Test async prediction when engine not running."""
+ framework._initialized = True
+ framework._engine_running = False
+
+ with pytest.raises(RuntimeError):
+ await framework.predict_async([1, 2, 3])
+
+ def test_predict_batch_sync(self, framework):
+ """Test synchronous batch prediction."""
+ mock_model = Mock()
+ mock_model.predict_batch.return_value = [
+ {"prediction": "result1"},
+ {"prediction": "result2"}
+ ]
+ framework.model = mock_model
+ framework._initialized = True
+
+ inputs = [[1, 2, 3], [4, 5, 6]]
+ results = framework.predict_batch(inputs)
+
+ assert len(results) == 2
+ mock_model.predict_batch.assert_called_once_with(inputs)
+
+ @pytest.mark.asyncio
+ async def test_predict_batch_async(self, framework):
+ """Test asynchronous batch prediction."""
+ mock_engine = AsyncMock()
+ mock_engine.predict_batch.return_value = [
+ {"prediction": "async_result1"},
+ {"prediction": "async_result2"}
+ ]
+
+ framework.engine = mock_engine
+ framework._initialized = True
+ framework._engine_running = True
+
+ inputs = [[1, 2, 3], [4, 5, 6]]
+ results = await framework.predict_batch_async(inputs, priority=2, timeout=10.0)
+
+ assert len(results) == 2
+ mock_engine.predict_batch.assert_called_once_with(inputs, 2, 10.0)
+
+ def test_benchmark(self, framework, sample_tensor):
+ """Test model benchmarking."""
+ mock_model = Mock()
+ mock_model.predict.return_value = {"prediction": "benchmark_result"}
+ mock_model.device = torch.device("cpu")
+ mock_model.model_info = {"type": "test_model"}
+
+ framework.model = mock_model
+ framework._initialized = True
+
+ # Mock time.perf_counter for consistent timing
+ with patch('time.perf_counter') as mock_time:
+ mock_time.side_effect = [0.0, 0.01, 0.02, 0.03] # Mock timing progression
+
+ results = framework.benchmark(sample_tensor, iterations=2, warmup=1)
+
+ assert isinstance(results, dict)
+ assert "iterations" in results
+ assert "mean_time_ms" in results
+ assert "throughput_fps" in results
+ assert "device" in results
+ assert "model_info" in results
+
+ assert results["iterations"] == 2
+ # Should call predict 3 times total (1 warmup + 2 iterations)
+ assert mock_model.predict.call_count == 3
+
+ def test_get_model_info(self, framework):
+ """Test getting model information."""
+ # Before loading
+ info = framework.get_model_info()
+ assert info == {"loaded": False}
+
+ # After loading
+ mock_model = Mock()
+ mock_model.model_info = {"type": "test", "parameters": 1000}
+ framework.model = mock_model
+ framework._initialized = True
+
+ info = framework.get_model_info()
+ assert info == {"type": "test", "parameters": 1000}
+
+ def test_get_engine_stats(self, framework):
+ """Test getting engine statistics."""
+ # Without engine
+ stats = framework.get_engine_stats()
+ assert stats == {"engine": "not_initialized"}
+
+ # With engine
+ mock_engine = Mock()
+ mock_engine.get_stats.return_value = {"requests": 100, "avg_time": 0.05}
+ framework.engine = mock_engine
+
+ stats = framework.get_engine_stats()
+ assert stats == {"requests": 100, "avg_time": 0.05}
+ mock_engine.get_stats.assert_called_once()
+
+ def test_get_performance_report(self, framework):
+ """Test getting performance report."""
+ mock_model = Mock()
+ mock_model.model_info = {"type": "test"}
+ mock_engine = Mock()
+ mock_engine.get_stats.return_value = {"requests": 50}
+ mock_engine.get_performance_report.return_value = {"avg_latency": 0.02}
+
+ framework.model = mock_model
+ framework.engine = mock_engine
+ framework._initialized = True
+ framework._engine_running = True
+
+ with patch.object(framework.performance_monitor, 'get_performance_summary') as mock_perf:
+ mock_perf.return_value = {"total_requests": 100}
+
+ report = framework.get_performance_report()
+
+ assert "framework_info" in report
+ assert "model_info" in report
+ assert "performance_metrics" in report
+ assert "engine_stats" in report
+ assert "engine_performance" in report
+
+ assert report["framework_info"]["initialized"]
+ assert report["framework_info"]["engine_running"]
+
+ @pytest.mark.asyncio
+ async def test_health_check(self, framework):
+ """Test health check functionality."""
+ # Mock components
+ mock_model = Mock()
+ mock_model.is_loaded = True
+ mock_engine = AsyncMock()
+ mock_engine.health_check.return_value = {"healthy": True}
+
+ framework.model = mock_model
+ framework.engine = mock_engine
+ framework._initialized = True
+
+ health = await framework.health_check()
+
+ assert isinstance(health, dict)
+ assert "healthy" in health
+ assert "checks" in health
+ assert "timestamp" in health
+
+ assert health["healthy"]
+ assert health["checks"]["framework_initialized"]
+ assert health["checks"]["model_loaded"]
+ assert health["checks"]["engine"]["healthy"]
+
+ @pytest.mark.asyncio
+ async def test_health_check_unhealthy(self, framework):
+ """Test health check with unhealthy components."""
+ mock_model = Mock()
+ mock_model.is_loaded = False # Unhealthy
+ framework.model = mock_model
+ framework._initialized = False # Unhealthy
+
+ health = await framework.health_check()
+
+ assert not health["healthy"]
+ assert not health["checks"]["framework_initialized"]
+ assert not health["checks"]["model_loaded"]
+
+ def test_cleanup(self, framework):
+ """Test framework cleanup."""
+ mock_model = Mock()
+ mock_engine = Mock()
+ mock_model_manager = Mock()
+
+ framework.model = mock_model
+ framework.engine = mock_engine
+ framework._engine_running = True
+ framework._model_manager = mock_model_manager
+
+ # Test synchronous cleanup
+ framework.cleanup()
+
+ mock_model.cleanup.assert_called_once()
+ mock_model_manager.cleanup_all.assert_called_once()
+
+ def test_async_context_manager(self, framework):
+ """Test using framework as async context manager."""
+ # This test focuses on checking the model_manager property
+ assert hasattr(framework, 'model_manager')
+ assert hasattr(framework.model_manager, 'cleanup_all')
+
+ # Verify the property works
+ assert framework.model_manager == framework._model_manager
+
+ def test_sync_context_manager(self, framework):
+ """Test using framework as sync context manager."""
+ mock_model = Mock()
+ framework.model = mock_model
+
+ with framework as ctx:
+ assert ctx == framework
+
+ # Should cleanup on exit
+ mock_model.cleanup.assert_called_once()
+
+
+class TestFrameworkFactoryFunctions:
+ """Test framework factory functions."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create simple test model."""
+ return nn.Sequential(
+ nn.Linear(784, 10),
+ nn.Softmax(dim=1)
+ )
+
+ def test_create_classification_framework(self, simple_model, temp_model_dir):
+ """Test creating classification framework."""
+ model_path = temp_model_dir / "classifier.pt"
+ torch.save(simple_model, model_path)
+
+ with patch('framework.TorchInferenceFramework.load_model'):
+ framework = create_classification_framework(
+ model_path=model_path,
+ num_classes=10,
+ class_names=["class_" + str(i) for i in range(10)],
+ input_size=(224, 224)
+ )
+
+ assert isinstance(framework, TorchInferenceFramework)
+ assert framework.config.model_type.value == "classification"
+
+ def test_create_detection_framework(self, simple_model, temp_model_dir):
+ """Test creating detection framework."""
+ model_path = temp_model_dir / "detector.pt"
+ torch.save(simple_model, model_path)
+
+ with patch('framework.TorchInferenceFramework.load_model'):
+ framework = create_detection_framework(
+ model_path=model_path,
+ class_names=["person", "car", "bike"],
+ input_size=(640, 640),
+ confidence_threshold=0.7
+ )
+
+ assert isinstance(framework, TorchInferenceFramework)
+ assert framework.config.model_type.value == "detection"
+
+ def test_create_segmentation_framework(self, simple_model, temp_model_dir):
+ """Test creating segmentation framework."""
+ model_path = temp_model_dir / "segmenter.pt"
+ torch.save(simple_model, model_path)
+
+ with patch('framework.TorchInferenceFramework.load_model'):
+ framework = create_segmentation_framework(
+ model_path=model_path,
+ input_size=(512, 512),
+ threshold=0.6
+ )
+
+ assert isinstance(framework, TorchInferenceFramework)
+ assert framework.config.model_type.value == "segmentation"
+
+
+class TestConvenienceFunctions:
+ """Test convenience prediction functions."""
+
+ def test_predict_image_classification(self, temp_model_dir, sample_image_path):
+ """Test quick image classification prediction."""
+ model_path = temp_model_dir / "classifier.pt"
+ model_path.touch()
+
+ with patch('framework.create_classification_framework') as mock_create:
+ mock_framework = Mock()
+ mock_framework.predict.return_value = {
+ "predictions": [0.8, 0.1, 0.1],
+ "class": 0
+ }
+ mock_framework.__enter__ = Mock(return_value=mock_framework)
+ mock_framework.__exit__ = Mock(return_value=None)
+ mock_create.return_value = mock_framework
+
+ result = predict_image_classification(
+ model_path=model_path,
+ image_path=sample_image_path,
+ num_classes=3,
+ class_names=["cat", "dog", "bird"]
+ )
+
+ assert isinstance(result, dict)
+ mock_create.assert_called_once()
+ mock_framework.predict.assert_called_once_with(sample_image_path)
+
+ def test_predict_object_detection(self, temp_model_dir, sample_image_path):
+ """Test quick object detection prediction."""
+ model_path = temp_model_dir / "detector.pt"
+ model_path.touch()
+
+ with patch('framework.create_detection_framework') as mock_create:
+ mock_framework = Mock()
+ mock_framework.predict.return_value = {
+ "boxes": [[100, 100, 200, 200]],
+ "scores": [0.9],
+ "classes": [1]
+ }
+ mock_framework.__enter__ = Mock(return_value=mock_framework)
+ mock_framework.__exit__ = Mock(return_value=None)
+ mock_create.return_value = mock_framework
+
+ result = predict_object_detection(
+ model_path=model_path,
+ image_path=sample_image_path,
+ class_names=["person", "car"],
+ confidence_threshold=0.7
+ )
+
+ assert isinstance(result, dict)
+ mock_create.assert_called_once()
+
+ def test_predict_segmentation(self, temp_model_dir, sample_image_path):
+ """Test quick segmentation prediction."""
+ model_path = temp_model_dir / "segmenter.pt"
+ model_path.touch()
+
+ with patch('framework.create_segmentation_framework') as mock_create:
+ mock_framework = Mock()
+ mock_framework.predict.return_value = {
+ "mask": [[0, 1, 1], [0, 0, 1], [0, 0, 0]],
+ "classes": [0, 1]
+ }
+ mock_framework.__enter__ = Mock(return_value=mock_framework)
+ mock_framework.__exit__ = Mock(return_value=None)
+ mock_create.return_value = mock_framework
+
+ result = predict_segmentation(
+ model_path=model_path,
+ image_path=sample_image_path,
+ threshold=0.5
+ )
+
+ assert isinstance(result, dict)
+ mock_create.assert_called_once()
+
+
+class TestGlobalFramework:
+ """Test global framework management."""
+
+ def test_get_global_framework(self):
+ """Test getting global framework instance."""
+ framework1 = get_global_framework()
+ framework2 = get_global_framework()
+
+ # Should be the same instance
+ assert framework1 is framework2
+ assert isinstance(framework1, TorchInferenceFramework)
+
+ def test_set_global_framework(self, test_config):
+ """Test setting global framework instance."""
+ custom_framework = TorchInferenceFramework(test_config)
+ set_global_framework(custom_framework)
+
+ retrieved_framework = get_global_framework()
+ assert retrieved_framework is custom_framework
+
+
+class TestOptimizedFramework:
+ """Test optimized framework creation."""
+
+ def test_create_optimized_framework(self, test_config):
+ """Test creating optimized framework."""
+ optimized = create_optimized_framework(test_config)
+
+ assert isinstance(optimized, TorchInferenceFramework)
+ # Should be a subclass with optimized model loading
+ assert optimized.config == test_config
+
+ def test_optimized_framework_model_loading(self, test_config, simple_model, temp_model_dir):
+ """Test optimized framework model loading."""
+ model_path = temp_model_dir / "model.pt"
+ torch.save(simple_model, model_path)
+
+ optimized = create_optimized_framework(test_config)
+
+ with patch('framework.OptimizedModel') as mock_optimized_model:
+ mock_model_instance = Mock()
+ mock_model_instance.load_model = Mock()
+ mock_optimized_model.return_value = mock_model_instance
+
+ with patch('framework.core.inference_engine.create_inference_engine'):
+ optimized.load_model(model_path, "optimized_test")
+
+ # Should use OptimizedModel instead of regular model adapter
+ mock_optimized_model.assert_called_once_with(test_config)
+ assert optimized.model == mock_model_instance
+
+
+class TestFrameworkErrorHandling:
+ """Test error handling in framework operations."""
+
+ def test_framework_with_invalid_config(self):
+ """Test framework with invalid configuration."""
+ # Should handle None config gracefully
+ with patch('framework.core.config.get_global_config') as mock_config:
+ mock_config.return_value = InferenceConfig()
+
+ framework = TorchInferenceFramework(None)
+ assert framework.config is not None
+
+ def test_prediction_error_handling(self, framework):
+ """Test prediction error handling."""
+ mock_model = Mock()
+ mock_model.predict.side_effect = Exception("Prediction failed")
+
+ framework.model = mock_model
+ framework._initialized = True
+
+ with pytest.raises(Exception):
+ framework.predict([1, 2, 3])
+
+ @pytest.mark.asyncio
+ async def test_async_prediction_error_handling(self, framework):
+ """Test async prediction error handling."""
+ mock_engine = AsyncMock()
+ mock_engine.predict.side_effect = Exception("Async prediction failed")
+
+ framework.engine = mock_engine
+ framework._initialized = True
+ framework._engine_running = True
+
+ with pytest.raises(Exception):
+ await framework.predict_async([1, 2, 3])
+
+ def test_benchmark_error_handling(self, framework):
+ """Test benchmark error handling."""
+ framework._initialized = False
+
+ with pytest.raises(RuntimeError):
+ framework.benchmark([1, 2, 3])
+
+
+class TestFrameworkIntegration:
+ """Integration tests for framework functionality."""
+
+ @pytest.mark.asyncio
+ async def test_complete_workflow(self, simple_model, temp_model_dir, sample_image_path):
+ """Test complete inference workflow."""
+ model_path = temp_model_dir / "complete_model.pt"
+ torch.save(simple_model, model_path)
+
+ # Create framework
+ framework = TorchInferenceFramework(InferenceConfig())
+
+ with patch.object(framework, 'load_model') as mock_load_method:
+ # Create a mock model instance
+ mock_model = Mock()
+ mock_model.predict.return_value = {"prediction": "test"}
+ mock_model.is_loaded = True
+ mock_model.model_info = {"test": True}
+ mock_model.cleanup = Mock()
+
+ # Mock the predict_batch method to return a list
+ mock_model.predict_batch.return_value = [{"prediction": "test"}, {"prediction": "test"}]
+
+ # Set up the mock to assign the model when load_model is called
+ def mock_load_side_effect(*args, **kwargs):
+ framework.model = mock_model
+ framework._initialized = True
+ # Also need to create a mock engine with async methods
+ mock_engine = AsyncMock()
+ mock_engine.health_check.return_value = {"healthy": True, "checks": {}}
+ mock_engine.get_stats.return_value = {"test": True}
+ mock_engine.get_performance_report.return_value = {"test": True}
+ framework.engine = mock_engine
+
+ mock_load_method.side_effect = mock_load_side_effect
+
+ # Load model
+ framework.load_model(model_path, "complete_test")
+
+ # Verify the mock was used
+ assert framework.model == mock_model
+
+ # Test synchronous prediction
+ sync_result = framework.predict(sample_image_path)
+ assert sync_result == {"prediction": "test"}
+
+ # Test batch prediction
+ batch_result = framework.predict_batch([sample_image_path, sample_image_path])
+ assert len(batch_result) == 2
+
+ # Test model info
+ info = framework.get_model_info()
+ assert info is not None
+
+ # Test performance report
+ report = framework.get_performance_report()
+ assert "framework_info" in report
+
+ # Test health check
+ health = await framework.health_check()
+ assert health["healthy"]
+
+ def test_framework_lifecycle_management(self, test_config, simple_model, temp_model_dir):
+ """Test framework lifecycle management."""
+ model_path = temp_model_dir / "lifecycle_model.pt"
+ torch.save(simple_model, model_path)
+
+ with patch('framework.load_model') as mock_load:
+ mock_model = Mock()
+ mock_model.cleanup = Mock()
+ mock_model.is_loaded = True
+ mock_model.model_info = {"test": True}
+ mock_load.return_value = mock_model
+
+ # Use as context manager
+ with TorchInferenceFramework(test_config) as framework:
+ framework.load_model(model_path)
+ assert framework._initialized
+ # Verify the mock model is being used
+ assert framework.model == mock_model # Should cleanup on exit
+ mock_model.cleanup.assert_called_once()
diff --git a/tests/unit/test_inference_engine.py b/tests/unit/test_inference_engine.py
new file mode 100644
index 0000000..85f8935
--- /dev/null
+++ b/tests/unit/test_inference_engine.py
@@ -0,0 +1,549 @@
+"""Tests for inference engine functionality."""
+
+import pytest
+import asyncio
+import time
+from unittest.mock import Mock, patch, AsyncMock
+import torch
+
+from framework.core.inference_engine import (
+ InferenceEngine, InferenceRequest, BatchResult, PIDController,
+ RequestQueue, create_inference_engine
+)
+from framework.core.base_model import BaseModel
+from framework.core.config import InferenceConfig, BatchConfig, PerformanceConfig
+
+
+class MockInferenceModel(BaseModel):
+ """Mock model for testing inference engine."""
+
+ def __init__(self, config: InferenceConfig, prediction_time: float = 0.01):
+ super().__init__(config)
+ self.prediction_time = prediction_time
+ self.model = Mock()
+ self._is_loaded = True
+
+ def load_model(self, model_path):
+ pass
+
+ def preprocess(self, inputs):
+ return torch.tensor(inputs) if not isinstance(inputs, torch.Tensor) else inputs
+
+ def forward(self, inputs):
+ # Simulate processing time
+ time.sleep(self.prediction_time)
+ return torch.randn(inputs.shape[0], 10)
+
+ def postprocess(self, outputs):
+ return {"predictions": outputs.tolist()}
+
+
+class TestPIDController:
+ """Test PID controller functionality."""
+
+ def test_pid_initialization(self):
+ """Test PID controller initialization."""
+ controller = PIDController(kp=0.6, ki=0.1, kd=0.05, setpoint=50.0)
+
+ assert controller.kp == 0.6
+ assert controller.ki == 0.1
+ assert controller.kd == 0.05
+ assert controller.setpoint == 50.0
+ assert controller.min_value == 1
+ assert controller.max_value == 32
+
+ def test_pid_update_basic(self):
+ """Test basic PID update."""
+ controller = PIDController(setpoint=50.0, min_value=1, max_value=10)
+
+ # Current value higher than setpoint - should decrease
+ result = controller.update(60.0)
+ assert 1 <= result <= 10
+
+ # Current value lower than setpoint - should increase
+ result = controller.update(40.0)
+ assert 1 <= result <= 10
+
+ def test_pid_bounds(self):
+ """Test PID controller bounds."""
+ controller = PIDController(setpoint=50.0, min_value=2, max_value=8)
+
+ # Update with extreme values
+ result = controller.update(1000.0) # Very high
+ assert 2 <= result <= 8
+
+ result = controller.update(0.1) # Very low
+ assert 2 <= result <= 8
+
+ def test_pid_reset(self):
+ """Test PID controller reset."""
+ controller = PIDController()
+
+ # Make some updates
+ controller.update(100.0)
+ controller.update(10.0)
+
+ # Reset
+ controller.reset()
+
+ assert controller.prev_error == 0
+ assert controller.integral == 0
+ assert controller.last_value == controller.min_value
+
+
+class TestRequestQueue:
+ """Test request queue functionality."""
+
+ def test_queue_initialization(self):
+ """Test request queue initialization."""
+ queue = RequestQueue(max_size=100)
+
+ assert queue.size() == 0
+ assert queue.max_size == 100
+
+ @pytest.mark.asyncio
+ async def test_queue_put_get(self):
+ """Test basic put and get operations."""
+ queue = RequestQueue(max_size=10)
+
+ # Create mock request
+ future = asyncio.Future()
+ request = InferenceRequest(
+ id="test_1",
+ inputs=[1, 2, 3],
+ future=future,
+ timestamp=time.time(),
+ priority=0
+ )
+
+ # Put request
+ await queue.put(request)
+ assert queue.size() == 1
+
+ # Get batch
+ batch = queue.get_batch(max_batch_size=5, timeout=1.0)
+ assert len(batch) == 1
+ assert batch[0].id == "test_1"
+ assert queue.size() == 0
+
+ @pytest.mark.asyncio
+ async def test_queue_priority(self):
+ """Test priority-based request ordering."""
+ queue = RequestQueue(max_size=10)
+
+ # Add requests with different priorities
+ requests = []
+ for i, priority in enumerate([1, 3, 2]):
+ future = asyncio.Future()
+ request = InferenceRequest(
+ id=f"test_{i}",
+ inputs=[i],
+ future=future,
+ timestamp=time.time(),
+ priority=priority
+ )
+ requests.append(request)
+ await queue.put(request)
+
+ # Get batch - should be ordered by priority (highest first)
+ batch = queue.get_batch(max_batch_size=10, timeout=1.0)
+ assert len(batch) == 3
+ assert batch[0].priority == 3 # Highest priority first
+ assert batch[1].priority == 2
+ assert batch[2].priority == 1
+
+ def test_queue_get_timeout(self):
+ """Test queue get with timeout."""
+ queue = RequestQueue(max_size=10)
+
+ # Get from empty queue with short timeout
+ batch = queue.get_batch(max_batch_size=5, timeout=0.1)
+ assert len(batch) == 0
+
+ def test_queue_clear(self):
+ """Test queue clear."""
+ queue = RequestQueue(max_size=10)
+
+ # Add some mock requests
+ for i in range(3):
+ queue._queue.append(Mock())
+
+ assert queue.size() == 3
+ queue.clear()
+ assert queue.size() == 0
+
+
+class TestInferenceEngine:
+ """Test inference engine functionality."""
+
+ @pytest.fixture
+ def mock_model(self, test_config):
+ """Create mock model for testing."""
+ return MockInferenceModel(test_config)
+
+ @pytest.fixture
+ def inference_config(self):
+ """Create inference configuration for testing."""
+ return InferenceConfig(
+ batch=BatchConfig(
+ batch_size=2,
+ max_batch_size=8,
+ min_batch_size=1,
+ queue_size=100
+ ),
+ performance=PerformanceConfig(
+ max_workers=2,
+ enable_profiling=True
+ )
+ )
+
+ def test_engine_initialization(self, mock_model, inference_config):
+ """Test inference engine initialization."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ assert engine.model == mock_model
+ assert engine.config == inference_config
+ assert not engine._running
+ assert engine._current_batch_size == inference_config.batch.batch_size
+ assert engine._stats["requests_processed"] == 0
+
+ @pytest.mark.asyncio
+ async def test_engine_start_stop(self, mock_model, inference_config):
+ """Test engine start and stop."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ # Start engine
+ await engine.start()
+ assert engine._running
+ assert engine._worker_task is not None
+
+ # Stop engine
+ await engine.stop()
+ assert not engine._running
+
+ @pytest.mark.asyncio
+ async def test_single_prediction(self, mock_model, inference_config):
+ """Test single prediction through engine."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ async with engine.async_context():
+ result = await engine.predict([1, 2, 3], priority=1, timeout=5.0)
+
+ assert isinstance(result, dict)
+ assert "predictions" in result
+
+ @pytest.mark.asyncio
+ async def test_batch_prediction(self, mock_model, inference_config):
+ """Test batch prediction through engine."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ inputs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+
+ async with engine.async_context():
+ results = await engine.predict_batch(inputs, priority=1, timeout=10.0)
+
+ assert len(results) == 3
+ for result in results:
+ assert isinstance(result, dict)
+ assert "predictions" in result
+
+ @pytest.mark.asyncio
+ async def test_concurrent_predictions(self, mock_model, inference_config):
+ """Test concurrent predictions."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ async with engine.async_context():
+ # Submit multiple concurrent requests
+ tasks = []
+ for i in range(5):
+ task = engine.predict([i, i+1, i+2], priority=i, timeout=10.0)
+ tasks.append(task)
+
+ # Wait for all to complete
+ results = await asyncio.gather(*tasks)
+
+ assert len(results) == 5
+ for result in results:
+ assert isinstance(result, dict)
+ assert "predictions" in result
+
+ @pytest.mark.asyncio
+ async def test_prediction_timeout(self, mock_model, inference_config):
+ """Test prediction timeout handling."""
+ # Create slow model
+ slow_model = MockInferenceModel(inference_config, prediction_time=2.0)
+ engine = InferenceEngine(slow_model, inference_config)
+
+ async with engine.async_context():
+ with pytest.raises(asyncio.TimeoutError):
+ await engine.predict([1, 2, 3], timeout=0.5)
+
+ def test_engine_stats(self, mock_model, inference_config):
+ """Test engine statistics tracking."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ stats = engine.get_stats()
+
+ assert isinstance(stats, dict)
+ assert "requests_processed" in stats
+ assert "batches_processed" in stats
+ assert "total_processing_time" in stats
+ assert "average_batch_size" in stats
+ assert "errors" in stats
+
+ def test_performance_report(self, mock_model, inference_config):
+ """Test performance report generation."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ report = engine.get_performance_report()
+
+ assert isinstance(report, dict)
+ assert "engine_stats" in report
+ assert "performance_metrics" in report
+ assert "current_batch_size" in report
+
+ @pytest.mark.asyncio
+ async def test_health_check(self, mock_model, inference_config):
+ """Test engine health check."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ # Health check when not running
+ health = await engine.health_check()
+ assert isinstance(health, dict)
+ assert "healthy" in health
+ assert "checks" in health
+
+ # Health check when running
+ async with engine.async_context():
+ health = await engine.health_check()
+ assert health["checks"]["engine_running"]
+
+ @pytest.mark.asyncio
+ async def test_engine_cleanup(self, mock_model, inference_config):
+ """Test engine resource cleanup."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ await engine.start()
+ await engine.cleanup()
+
+ assert not engine._running
+ assert engine._executor._shutdown
+
+ @pytest.mark.asyncio
+ async def test_error_handling(self, test_config):
+ """Test error handling in engine."""
+ # Create failing model
+ class FailingModel(MockInferenceModel):
+ def forward(self, inputs):
+ raise RuntimeError("Mock model failure")
+
+ failing_model = FailingModel(test_config)
+ engine = InferenceEngine(failing_model)
+
+ async with engine.async_context():
+ with pytest.raises(Exception):
+ await engine.predict([1, 2, 3], timeout=5.0)
+
+ def test_dynamic_batch_sizing(self, mock_model, inference_config):
+ """Test dynamic batch sizing with PID controller."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ # Initial batch size
+ initial_batch_size = engine._current_batch_size
+ assert initial_batch_size == inference_config.batch.batch_size
+
+ # Simulate high latency - should decrease batch size
+ engine.pid_controller.update(100.0) # High latency
+ new_batch_size = engine.pid_controller.last_value
+
+ # Should be different from initial
+ assert isinstance(new_batch_size, int)
+ assert inference_config.batch.min_batch_size <= new_batch_size <= inference_config.batch.max_batch_size
+
+
+class TestInferenceEngineIntegration:
+ """Integration tests for inference engine."""
+
+ @pytest.mark.asyncio
+ async def test_full_pipeline_with_monitoring(self, mock_model, inference_config):
+ """Test complete pipeline with performance monitoring."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ async with engine.async_context():
+ # Submit various requests
+ tasks = []
+
+ # Single predictions
+ for i in range(3):
+ task = engine.predict([i, i+1, i+2], priority=1)
+ tasks.append(task)
+
+ # Batch prediction
+ batch_task = engine.predict_batch([
+ [10, 11, 12],
+ [20, 21, 22],
+ [30, 31, 32]
+ ], priority=2)
+ tasks.append(batch_task)
+
+ # Execute all
+ results = await asyncio.gather(*tasks)
+
+ # Check results
+ assert len(results) == 4
+
+ # Check stats
+ stats = engine.get_stats()
+ assert stats["requests_processed"] > 0
+
+ # Check performance report
+ report = engine.get_performance_report()
+ assert "engine_stats" in report
+ assert "performance_metrics" in report
+
+ @pytest.mark.asyncio
+ async def test_high_throughput_scenario(self, mock_model, inference_config):
+ """Test high throughput scenario."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ async with engine.async_context():
+ # Submit many concurrent requests
+ num_requests = 50
+ tasks = []
+
+ for i in range(num_requests):
+ task = engine.predict([i, i+1, i+2], priority=i % 5)
+ tasks.append(task)
+
+ # Process all requests
+ start_time = time.time()
+ results = await asyncio.gather(*tasks, return_exceptions=True)
+ end_time = time.time()
+
+ # Check results
+ successful_results = [r for r in results if not isinstance(r, Exception)]
+ assert len(successful_results) > 0
+
+ # Check throughput
+ processing_time = end_time - start_time
+ throughput = len(successful_results) / processing_time
+
+ # Should process multiple requests per second
+ assert throughput > 1.0
+
+ @pytest.mark.asyncio
+ async def test_mixed_workload(self, mock_model, inference_config):
+ """Test mixed workload with different request types."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ async with engine.async_context():
+ # Mix of single predictions and batches
+ tasks = []
+
+ # High priority single requests
+ for i in range(5):
+ task = engine.predict([i], priority=10)
+ tasks.append(task)
+
+ # Medium priority batch
+ batch_task = engine.predict_batch([
+ [10, 11], [12, 13], [14, 15]
+ ], priority=5)
+ tasks.append(batch_task)
+
+ # Low priority single requests
+ for i in range(5):
+ task = engine.predict([i + 100], priority=1)
+ tasks.append(task)
+
+ # Execute with timeout
+ results = await asyncio.wait_for(
+ asyncio.gather(*tasks),
+ timeout=30.0
+ )
+
+ assert len(results) == 11
+
+
+class TestCreateInferenceEngine:
+ """Test inference engine factory function."""
+
+ def test_create_inference_engine(self, mock_model, inference_config):
+ """Test creating inference engine via factory function."""
+ engine = create_inference_engine(mock_model, inference_config)
+
+ assert isinstance(engine, InferenceEngine)
+ assert engine.model == mock_model
+ assert engine.config == inference_config
+
+ def test_create_inference_engine_with_defaults(self, mock_model):
+ """Test creating inference engine with default config."""
+ engine = create_inference_engine(mock_model)
+
+ assert isinstance(engine, InferenceEngine)
+ assert engine.model == mock_model
+ assert engine.config == mock_model.config
+
+
+class TestEngineErrorHandling:
+ """Test error handling in various scenarios."""
+
+ @pytest.mark.asyncio
+ async def test_model_loading_error(self, test_config):
+ """Test handling model loading errors."""
+ class FailingLoadModel(MockInferenceModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self._is_loaded = False
+
+ def load_model(self, model_path):
+ raise RuntimeError("Failed to load model")
+
+ model = FailingLoadModel(test_config)
+ engine = InferenceEngine(model)
+
+ # Should handle gracefully
+ with pytest.raises(Exception):
+ await engine.predict([1, 2, 3])
+
+ @pytest.mark.asyncio
+ async def test_queue_full_handling(self, mock_model, inference_config):
+ """Test handling of full request queue."""
+ # Create engine with very small queue
+ small_config = inference_config
+ small_config.batch.queue_size = 2
+
+ engine = InferenceEngine(mock_model, small_config)
+
+ async with engine.async_context():
+ # Fill queue beyond capacity
+ tasks = []
+ for i in range(5): # More than queue size
+ task = engine.predict([i], timeout=0.1)
+ tasks.append(task)
+
+ # Some should succeed, some may timeout
+ results = await asyncio.gather(*tasks, return_exceptions=True)
+
+ # Should have mix of results and exceptions
+ exceptions = [r for r in results if isinstance(r, Exception)]
+ successes = [r for r in results if not isinstance(r, Exception)]
+
+ # At least some should succeed
+ assert len(successes) > 0
+
+ @pytest.mark.asyncio
+ async def test_worker_task_error_recovery(self, mock_model, inference_config):
+ """Test recovery from worker task errors."""
+ engine = InferenceEngine(mock_model, inference_config)
+
+ await engine.start()
+
+ # Simulate worker task failure
+ if engine._worker_task:
+ engine._worker_task.cancel()
+
+ # Engine should still be able to handle requests
+ # (Implementation dependent - may need to restart worker)
+
+ await engine.stop()
diff --git a/tests/unit/test_optimizers.py b/tests/unit/test_optimizers.py
new file mode 100644
index 0000000..ddeeb4e
--- /dev/null
+++ b/tests/unit/test_optimizers.py
@@ -0,0 +1,617 @@
+"""Tests for optimizer modules."""
+
+import pytest
+import torch
+import torch.nn as nn
+from unittest.mock import Mock, patch, MagicMock
+from pathlib import Path
+
+# Test imports with mock fallbacks
+try:
+ from framework.optimizers import (
+ TensorRTOptimizer, ONNXOptimizer, QuantizationOptimizer,
+ MemoryOptimizer, CUDAOptimizer, JITOptimizer,
+ convert_to_tensorrt, convert_to_onnx, quantize_model,
+ enable_cuda_optimizations, jit_compile_model
+ )
+except ImportError:
+ # Create mock classes for testing when optimizers are not available
+ TensorRTOptimizer = None
+ ONNXOptimizer = None
+ QuantizationOptimizer = None
+ MemoryOptimizer = None
+ CUDAOptimizer = None
+ JITOptimizer = None
+ convert_to_tensorrt = None
+ convert_to_onnx = None
+ quantize_model = None
+ enable_cuda_optimizations = None
+ jit_compile_model = None
+
+
+class MockOptimizer:
+ """Base mock optimizer for testing."""
+
+ def __init__(self, available: bool = True):
+ self.available = available
+ self.optimized_models = []
+
+ def is_available(self) -> bool:
+ return self.available
+
+ def optimize(self, model, **kwargs):
+ if not self.available:
+ raise RuntimeError("Optimizer not available")
+
+ # Mock optimization - return the same model
+ optimized_model = model
+ self.optimized_models.append(optimized_model)
+ return optimized_model
+
+
+class TestOptimizerAvailability:
+ """Test optimizer availability detection."""
+
+ def test_optimizer_imports(self):
+ """Test that optimizer imports work (even if None)."""
+ # These should not raise import errors
+ optimizers = [
+ TensorRTOptimizer,
+ ONNXOptimizer,
+ QuantizationOptimizer,
+ MemoryOptimizer,
+ CUDAOptimizer,
+ JITOptimizer
+ ]
+
+ functions = [
+ convert_to_tensorrt,
+ convert_to_onnx,
+ quantize_model,
+ enable_cuda_optimizations,
+ jit_compile_model
+ ]
+
+ # Should be either callable or None
+ for optimizer in optimizers:
+ assert optimizer is None or callable(optimizer)
+
+ for func in functions:
+ assert func is None or callable(func)
+
+
+@pytest.mark.skipif(TensorRTOptimizer is None, reason="TensorRT not available")
+class TestTensorRTOptimizer:
+ """Test TensorRT optimizer functionality."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create a simple test model."""
+ return nn.Sequential(
+ nn.Linear(10, 50),
+ nn.ReLU(),
+ nn.Linear(50, 10)
+ )
+
+ @pytest.fixture
+ def sample_input(self):
+ """Create sample input tensor."""
+ return torch.randn(1, 10)
+
+ def test_tensorrt_optimizer_creation(self):
+ """Test TensorRT optimizer creation."""
+ optimizer = TensorRTOptimizer()
+ assert optimizer is not None
+
+ def test_tensorrt_optimizer_unavailable(self):
+ """Test TensorRT optimizer when CUDA/TensorRT unavailable."""
+ optimizer = TensorRTOptimizer()
+
+ # Should be disabled due to CUDA not available
+ assert not optimizer.is_available()
+
+ # Should return original model
+ simple_model = torch.nn.Linear(10, 1)
+ sample_input = torch.randn(1, 10)
+
+ result = optimizer.optimize(simple_model, example_inputs=sample_input)
+ assert result is simple_model # Should return the same model
+
+ @patch('framework.optimizers.tensorrt_optimizer.torch.cuda.is_available', return_value=True)
+ @patch('framework.optimizers.tensorrt_optimizer.torch_tensorrt')
+ @patch('framework.optimizers.tensorrt_optimizer._ensure_tensorrt_imported')
+ @patch('framework.optimizers.tensorrt_optimizer.TRT_AVAILABLE', True)
+ def test_tensorrt_availability_check(self, mock_trt_available, mock_ensure_import, mock_tensorrt, mock_cuda_available):
+ """Test TensorRT availability checking."""
+ # Mock successful import
+ mock_ensure_import.return_value = True
+
+ optimizer = TensorRTOptimizer()
+ optimizer._test_mode_available = True # Enable test mode
+ optimizer.enabled = True
+
+ # Test availability with mock
+ assert optimizer.is_available()
+
+ def test_tensorrt_optimization_fallback(self):
+ """Test TensorRT optimization fallback when not available."""
+ optimizer = TensorRTOptimizer()
+
+ # Should be disabled due to CUDA/TensorRT not available
+ assert not optimizer.is_available()
+
+ # Create a simple model and input
+ model = torch.nn.Linear(10, 1)
+ sample_input = torch.randn(1, 10)
+
+ # Should fall back to original model
+ result = optimizer.optimize(model, example_inputs=sample_input)
+ assert result is model # Should return the same model
+
+
+@pytest.mark.skipif(ONNXOptimizer is None, reason="ONNX not available")
+class TestONNXOptimizer:
+ """Test ONNX optimizer functionality."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create a simple test model."""
+ return nn.Sequential(
+ nn.Linear(10, 20),
+ nn.ReLU(),
+ nn.Linear(20, 5)
+ )
+
+ @pytest.fixture
+ def sample_input(self):
+ """Create sample input tensor."""
+ return torch.randn(1, 10)
+
+ def test_onnx_optimizer_creation(self):
+ """Test ONNX optimizer creation."""
+ optimizer = ONNXOptimizer()
+ assert optimizer is not None
+
+ @patch('framework.optimizers.onnx_optimizer.torch.onnx.export')
+ @patch('framework.optimizers.onnx_optimizer.onnxruntime')
+ def test_onnx_optimization(self, mock_ort, mock_export, simple_model, sample_input, temp_model_dir):
+ """Test ONNX model optimization."""
+ optimizer = ONNXOptimizer()
+
+ # Mock ONNX Runtime session
+ mock_session = Mock()
+ mock_ort.InferenceSession.return_value = mock_session
+
+ # Mock successful ONNX export
+ mock_export.return_value = None # torch.onnx.export returns None
+
+ onnx_path = temp_model_dir / "model.onnx"
+
+ # Create a mock ONNX file to simulate successful export
+ onnx_path.touch()
+
+ optimized_model = optimizer.optimize(
+ simple_model,
+ example_inputs=[sample_input],
+ output_path=str(onnx_path)
+ )
+
+ # Should call torch.onnx.export
+ mock_export.assert_called_once()
+ # Should create ONNX Runtime session - check if it was called
+ # Note: This might not be called if there are ONNX validation errors
+ assert optimized_model is not None
+
+
+@pytest.mark.skipif(QuantizationOptimizer is None, reason="Quantization optimizer not available")
+class TestQuantizationOptimizer:
+ """Test quantization optimizer functionality."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create a simple test model."""
+ model = nn.Sequential(
+ nn.Linear(10, 20),
+ nn.ReLU(),
+ nn.Linear(20, 5)
+ )
+ model.eval()
+ return model
+
+ def test_quantization_optimizer_creation(self):
+ """Test quantization optimizer creation."""
+ optimizer = QuantizationOptimizer()
+ assert optimizer is not None
+
+ def test_dynamic_quantization(self, simple_model):
+ """Test dynamic quantization."""
+ optimizer = QuantizationOptimizer()
+
+ quantized_model = optimizer.optimize(
+ simple_model,
+ quantization_type="dynamic",
+ dtype=torch.qint8
+ )
+
+ # Should return a model (may be the same or quantized)
+ assert quantized_model is not None
+ assert isinstance(quantized_model, nn.Module)
+
+ @patch('torch.quantization.quantize_dynamic')
+ def test_dynamic_quantization_with_mock(self, mock_quantize, simple_model):
+ """Test dynamic quantization with mock."""
+ optimizer = QuantizationOptimizer()
+
+ mock_quantized = Mock()
+ mock_quantize.return_value = mock_quantized
+
+ result = optimizer.optimize(
+ simple_model,
+ quantization_type="dynamic"
+ )
+
+ mock_quantize.assert_called_once()
+ assert result == mock_quantized
+
+
+@pytest.mark.skipif(JITOptimizer is None, reason="JIT optimizer not available")
+class TestJITOptimizer:
+ """Test JIT optimizer functionality."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create a simple test model."""
+ return nn.Sequential(
+ nn.Linear(10, 20),
+ nn.ReLU(),
+ nn.Linear(20, 5)
+ )
+
+ @pytest.fixture
+ def sample_input(self):
+ """Create sample input tensor."""
+ return torch.randn(1, 10)
+
+ def test_jit_optimizer_creation(self):
+ """Test JIT optimizer creation."""
+ optimizer = JITOptimizer()
+ assert optimizer is not None
+
+ @patch('torch.jit.script')
+ def test_jit_script_optimization(self, mock_script, simple_model):
+ """Test JIT script optimization."""
+ optimizer = JITOptimizer()
+
+ mock_scripted = Mock()
+ mock_script.return_value = mock_scripted
+
+ optimized_model = optimizer.optimize(
+ simple_model,
+ method="script"
+ )
+
+ mock_script.assert_called_once_with(simple_model)
+ assert optimized_model == mock_scripted
+
+ @patch('torch.jit.trace')
+ def test_jit_trace_optimization(self, mock_trace, simple_model, sample_input):
+ """Test JIT trace optimization."""
+ optimizer = JITOptimizer()
+
+ mock_traced = Mock()
+ mock_trace.return_value = mock_traced
+
+ optimized_model = optimizer.optimize(
+ simple_model,
+ method="trace",
+ example_inputs=[sample_input]
+ )
+
+ # Check that torch.jit.trace was called with the expected arguments
+ mock_trace.assert_called_once_with(simple_model, sample_input, strict=True, check_trace=True)
+ assert optimized_model == mock_traced
+
+
+@pytest.mark.skipif(CUDAOptimizer is None, reason="CUDA optimizer not available")
+class TestCUDAOptimizer:
+ """Test CUDA optimizer functionality."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create a simple test model."""
+ return nn.Sequential(
+ nn.Linear(10, 20),
+ nn.ReLU(),
+ nn.Linear(20, 5)
+ )
+
+ def test_cuda_optimizer_creation(self):
+ """Test CUDA optimizer creation."""
+ optimizer = CUDAOptimizer()
+ assert optimizer is not None
+
+ @patch('torch.cuda.is_available', return_value=True)
+ def test_cuda_optimization(self, mock_cuda_available, simple_model):
+ """Test CUDA optimization."""
+ optimizer = CUDAOptimizer()
+
+ optimized_model = optimizer.optimize(simple_model)
+
+ # Model should be moved to CUDA
+ # Note: Actual CUDA movement might not work in test environment
+ assert optimized_model is not None
+
+ @patch('torch.cuda.is_available', return_value=False)
+ def test_cuda_optimization_unavailable(self, mock_cuda_available, simple_model):
+ """Test CUDA optimization when CUDA unavailable."""
+ optimizer = CUDAOptimizer()
+
+ # Should either return original model or raise appropriate error
+ result = optimizer.optimize(simple_model)
+ assert result is not None
+
+
+@pytest.mark.skipif(MemoryOptimizer is None, reason="Memory optimizer not available")
+class TestMemoryOptimizer:
+ """Test memory optimizer functionality."""
+
+ def test_memory_optimizer_creation(self):
+ """Test memory optimizer creation."""
+ optimizer = MemoryOptimizer()
+ assert optimizer is not None
+
+ def test_memory_optimization(self, simple_model):
+ """Test memory optimization techniques."""
+ optimizer = MemoryOptimizer()
+
+ # Test gradient checkpointing enablement
+ optimizer.enable_gradient_checkpointing(simple_model)
+
+ # Test memory cleanup
+ optimizer.cleanup_memory()
+
+ # Should not raise errors
+
+
+class TestOptimizerFunctions:
+ """Test optimizer convenience functions."""
+
+ @pytest.fixture
+ def simple_model(self):
+ """Create a simple test model."""
+ return nn.Sequential(
+ nn.Linear(10, 20),
+ nn.ReLU(),
+ nn.Linear(20, 5)
+ )
+
+ @pytest.fixture
+ def sample_input(self):
+ """Create sample input tensor."""
+ return torch.randn(1, 10)
+
+ @pytest.mark.skipif(convert_to_tensorrt is None, reason="TensorRT function not available")
+ def test_convert_to_tensorrt_function(self, simple_model, sample_input):
+ """Test convert_to_tensorrt convenience function."""
+ # Only run test if TensorRT is actually available
+ if convert_to_tensorrt is None:
+ pytest.skip("TensorRT not available")
+
+ with patch('framework.optimizers.tensorrt_optimizer.TensorRTOptimizer') as mock_optimizer_class:
+ mock_optimizer = Mock()
+ mock_optimizer.optimize.return_value = simple_model
+ mock_optimizer_class.return_value = mock_optimizer
+
+ result = convert_to_tensorrt(simple_model, example_inputs=[sample_input])
+
+ mock_optimizer_class.assert_called_once()
+ mock_optimizer.optimize.assert_called_once()
+ assert result is not None
+
+ @pytest.mark.skipif(convert_to_onnx is None, reason="ONNX function not available")
+ def test_convert_to_onnx_function(self, simple_model, sample_input, temp_model_dir):
+ """Test convert_to_onnx convenience function."""
+ # Only run test if ONNX is actually available
+ if convert_to_onnx is None:
+ pytest.skip("ONNX not available")
+
+ with patch('framework.optimizers.onnx_optimizer.ONNXOptimizer') as mock_optimizer_class:
+ mock_optimizer = Mock()
+ mock_optimizer.optimize.return_value = simple_model
+ mock_optimizer_class.return_value = mock_optimizer
+
+ output_path = temp_model_dir / "model.onnx"
+ result = convert_to_onnx(
+ simple_model,
+ example_inputs=[sample_input],
+ output_path=str(output_path)
+ )
+
+ mock_optimizer_class.assert_called_once()
+ mock_optimizer.optimize.assert_called_once()
+ assert result is not None
+
+ @pytest.mark.skipif(quantize_model is None, reason="Quantization function not available")
+ def test_quantize_model_function(self, simple_model):
+ """Test quantize_model convenience function."""
+ with patch('framework.optimizers.QuantizationOptimizer') as mock_optimizer_class:
+ mock_optimizer = Mock()
+ mock_optimizer.optimize.return_value = simple_model
+ mock_optimizer_class.return_value = mock_optimizer
+
+ result = quantize_model(simple_model, quantization_type="dynamic")
+
+ mock_optimizer_class.assert_called_once()
+ mock_optimizer.optimize.assert_called_once()
+ assert result is not None
+
+ @pytest.mark.skipif(jit_compile_model is None, reason="JIT function not available")
+ def test_jit_compile_model_function(self, simple_model, sample_input):
+ """Test jit_compile_model convenience function."""
+ if jit_compile_model is None:
+ pytest.skip("JIT not available")
+
+ with patch('framework.optimizers.jit_optimizer.JITOptimizer') as mock_optimizer_class:
+ mock_optimizer = Mock()
+ mock_optimizer.optimize.return_value = simple_model
+ mock_optimizer_class.return_value = mock_optimizer
+
+ result = jit_compile_model(simple_model, method="trace", example_inputs=[sample_input])
+
+ mock_optimizer_class.assert_called_once()
+ mock_optimizer.optimize.assert_called_once()
+ assert result is not None
+
+ @pytest.mark.skipif(enable_cuda_optimizations is None, reason="CUDA function not available")
+ def test_enable_cuda_optimizations_function(self):
+ """Test enable_cuda_optimizations convenience function."""
+ if enable_cuda_optimizations is None:
+ pytest.skip("CUDA optimizations not available")
+
+ with patch('framework.optimizers.cuda_optimizer.CUDAOptimizer') as mock_optimizer_class:
+ mock_optimizer = Mock()
+ mock_optimizer_class.return_value = mock_optimizer
+
+ enable_cuda_optimizations()
+
+ mock_optimizer_class.assert_called_once()
+
+
+class TestOptimizerIntegration:
+ """Integration tests for optimizer functionality."""
+
+ @pytest.fixture
+ def complex_model(self):
+ """Create a more complex test model."""
+ return nn.Sequential(
+ nn.Conv2d(3, 32, 3, padding=1),
+ nn.ReLU(),
+ nn.MaxPool2d(2),
+ nn.Conv2d(32, 64, 3, padding=1),
+ nn.ReLU(),
+ nn.MaxPool2d(2),
+ nn.Flatten(),
+ nn.Linear(64 * 56 * 56, 128),
+ nn.ReLU(),
+ nn.Linear(128, 10)
+ )
+
+ @pytest.fixture
+ def image_input(self):
+ """Create sample image input."""
+ return torch.randn(1, 3, 224, 224)
+
+ def test_multiple_optimizations(self, complex_model, image_input):
+ """Test applying multiple optimizations sequentially."""
+ original_model = complex_model
+ optimized_model = original_model
+
+ # Apply available optimizations
+ optimizers_to_test = [
+ (JITOptimizer, {"method": "script"}),
+ (QuantizationOptimizer, {"quantization_type": "dynamic"}),
+ ]
+
+ for optimizer_class, kwargs in optimizers_to_test:
+ if optimizer_class is not None:
+ try:
+ optimizer = optimizer_class()
+ if hasattr(optimizer, 'is_available') and optimizer.is_available():
+ optimized_model = optimizer.optimize(optimized_model, **kwargs)
+ except Exception as e:
+ # Skip if optimization fails (common in test environments)
+ continue
+
+ # Should have a model (optimized or original)
+ assert optimized_model is not None
+
+ def test_optimization_pipeline(self, complex_model, image_input):
+ """Test complete optimization pipeline."""
+ model = complex_model
+
+ # Mock optimization pipeline
+ class MockOptimizationPipeline:
+ def __init__(self):
+ self.optimizers = []
+
+ # Add available optimizers
+ if JITOptimizer:
+ self.optimizers.append(("JIT", JITOptimizer()))
+ if QuantizationOptimizer:
+ self.optimizers.append(("Quantization", QuantizationOptimizer()))
+
+ def optimize(self, model):
+ optimized_model = model
+ results = {}
+
+ for name, optimizer in self.optimizers:
+ try:
+ # Mock successful optimization
+ results[name] = "success"
+ # In real implementation, would apply optimization
+ except Exception as e:
+ results[name] = f"failed: {e}"
+
+ return optimized_model, results
+
+ pipeline = MockOptimizationPipeline()
+ optimized_model, results = pipeline.optimize(model)
+
+ assert optimized_model is not None
+ assert isinstance(results, dict)
+
+
+class TestOptimizerErrorHandling:
+ """Test error handling in optimizers."""
+
+ def test_unavailable_optimizer_handling(self):
+ """Test handling of unavailable optimizers."""
+ # Mock unavailable optimizer
+ class UnavailableOptimizer:
+ def is_available(self):
+ return False
+
+ def optimize(self, model, **kwargs):
+ raise RuntimeError("Optimizer not available")
+
+ optimizer = UnavailableOptimizer()
+
+ # Should detect unavailability
+ assert not optimizer.is_available()
+
+ # Should raise appropriate error
+ with pytest.raises(RuntimeError):
+ optimizer.optimize(nn.Linear(10, 5))
+
+ def test_optimization_failure_recovery(self):
+ """Test recovery from optimization failures."""
+ class FailingOptimizer:
+ def optimize(self, model, **kwargs):
+ raise RuntimeError("Optimization failed")
+
+ optimizer = FailingOptimizer()
+ original_model = nn.Linear(10, 5)
+
+ # Should be able to catch and handle failure
+ try:
+ optimized_model = optimizer.optimize(original_model)
+ except RuntimeError:
+ # Fallback to original model
+ optimized_model = original_model
+
+ assert optimized_model == original_model
+
+ def test_invalid_optimization_parameters(self):
+ """Test handling of invalid optimization parameters."""
+ if QuantizationOptimizer:
+ optimizer = QuantizationOptimizer()
+ model = nn.Linear(10, 5)
+
+ # Test with invalid parameters - should either work or raise clear error
+ try:
+ result = optimizer.optimize(model, quantization_type="invalid_type")
+ # If it doesn't raise, should return some result
+ assert result is not None
+ except (ValueError, TypeError, RuntimeError):
+ # Expected for invalid parameters
+ pass
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
new file mode 100644
index 0000000..c32a31d
--- /dev/null
+++ b/tests/unit/test_utils.py
@@ -0,0 +1,489 @@
+"""Tests for utility modules."""
+
+import pytest
+import time
+from unittest.mock import Mock, patch
+from collections import deque
+
+from framework.utils.monitoring import (
+ PerformanceMonitor, MetricsCollector, PerformanceStats, PerformanceMetrics,
+ get_performance_monitor, get_metrics_collector, Metric, MetricType
+)
+
+
+class TestPerformanceMetrics:
+ """Test performance metrics data structure."""
+
+ def test_performance_metrics_creation(self):
+ """Test creating performance metrics."""
+ metrics = PerformanceMetrics(
+ inference_time=0.025,
+ preprocessing_time=0.002,
+ postprocessing_time=0.001,
+ total_time=0.028,
+ throughput=35.7,
+ memory_usage=1024*1024*50, # 50MB
+ gpu_utilization=75.5
+ )
+
+ assert metrics.inference_time == 0.025
+ assert metrics.preprocessing_time == 0.002
+ assert metrics.postprocessing_time == 0.001
+ assert metrics.total_time == 0.028
+ assert metrics.throughput == 35.7
+ assert metrics.memory_usage == 1024*1024*50
+ assert metrics.gpu_utilization == 75.5
+
+ def test_performance_metrics_defaults(self):
+ """Test performance metrics with defaults."""
+ metrics = PerformanceMetrics()
+
+ assert metrics.inference_time == 0.0
+ assert metrics.preprocessing_time == 0.0
+ assert metrics.postprocessing_time == 0.0
+ assert metrics.total_time == 0.0
+ assert metrics.throughput == 0.0
+ assert metrics.memory_usage == 0
+ assert metrics.gpu_utilization is None
+
+
+class TestPerformanceMonitor:
+ """Test performance monitoring functionality."""
+
+ @pytest.fixture
+ def performance_monitor(self):
+ """Create performance monitor instance."""
+ return PerformanceMonitor()
+
+ def test_monitor_initialization(self, performance_monitor):
+ """Test monitor initialization."""
+ assert performance_monitor.request_times == deque(maxlen=1000)
+ assert performance_monitor.total_requests == 0
+ assert performance_monitor.start_time is not None
+
+ def test_start_request_timing(self, performance_monitor):
+ """Test starting request timing."""
+ request_id = "test_request_1"
+ performance_monitor.start_request(request_id)
+
+ assert request_id in performance_monitor.active_requests
+ assert performance_monitor.active_requests[request_id] > 0
+
+ def test_end_request_timing(self, performance_monitor):
+ """Test ending request timing."""
+ request_id = "test_request_1"
+
+ # Start request
+ performance_monitor.start_request(request_id)
+ time.sleep(0.01) # Small delay
+
+ # End request
+ elapsed = performance_monitor.end_request(request_id)
+
+ assert elapsed > 0
+ assert request_id not in performance_monitor.active_requests
+ assert len(performance_monitor.request_times) > 0
+ assert performance_monitor.total_requests == 1
+
+ def test_end_request_not_started(self, performance_monitor):
+ """Test ending request that wasn't started."""
+ elapsed = performance_monitor.end_request("nonexistent_request")
+ assert elapsed == 0.0
+
+ def test_get_current_stats(self, performance_monitor):
+ """Test getting current statistics."""
+ # Record some requests
+ for i in range(5):
+ request_id = f"request_{i}"
+ performance_monitor.start_request(request_id)
+ time.sleep(0.001)
+ performance_monitor.end_request(request_id)
+
+ stats = performance_monitor.get_current_stats()
+
+ assert isinstance(stats, dict)
+ assert "total_requests" in stats
+ assert "avg_request_time" in stats
+ assert "active_requests" in stats
+ assert "uptime" in stats
+
+ assert stats["total_requests"] == 5
+ assert stats["avg_request_time"] > 0
+ assert stats["uptime"] > 0
+
+ def test_get_performance_summary(self, performance_monitor):
+ """Test getting performance summary."""
+ # Record requests with different timing patterns
+ fast_times = [0.01, 0.015, 0.012, 0.018, 0.014]
+ for i, duration in enumerate(fast_times):
+ request_id = f"fast_request_{i}"
+ performance_monitor.start_request(request_id)
+ time.sleep(duration)
+ performance_monitor.end_request(request_id)
+
+ summary = performance_monitor.get_performance_summary()
+
+ assert isinstance(summary, dict)
+ assert "total_requests" in summary
+ assert "window_seconds" in summary
+ assert "timestamp" in summary
+ assert "metrics" in summary
+
+ assert summary["total_requests"] == 5
+ assert summary["window_seconds"] > 0
+ assert summary["timestamp"] > 0
+
+ def test_record_batch_metrics(self, performance_monitor):
+ """Test recording batch metrics."""
+ metrics = PerformanceMetrics(
+ inference_time=0.05,
+ preprocessing_time=0.002,
+ postprocessing_time=0.001,
+ total_time=0.053,
+ throughput=18.9,
+ memory_usage=1024*1024*100,
+ gpu_utilization=80.0
+ )
+
+ performance_monitor.record_batch_metrics(4, metrics)
+
+ # Check that metrics were recorded
+ assert len(performance_monitor.batch_metrics) > 0
+ last_batch = performance_monitor.batch_metrics[-1]
+ assert last_batch["batch_size"] == 4
+ assert last_batch["metrics"] == metrics
+
+ def test_get_batch_performance(self, performance_monitor):
+ """Test getting batch performance statistics."""
+ # Record multiple batch metrics
+ for i in range(3):
+ metrics = PerformanceMetrics(
+ inference_time=0.02 + i * 0.01,
+ total_time=0.025 + i * 0.01,
+ throughput=40.0 - i * 5.0
+ )
+ performance_monitor.record_batch_metrics(2 + i, metrics)
+
+ batch_perf = performance_monitor.get_batch_performance()
+
+ assert isinstance(batch_perf, dict)
+ assert "total_batches" in batch_perf
+ assert "average_batch_size" in batch_perf
+ assert "average_inference_time" in batch_perf
+ assert "average_throughput" in batch_perf
+
+ assert batch_perf["total_batches"] == 3
+ assert batch_perf["average_batch_size"] > 2.0
+
+ def test_reset_statistics(self, performance_monitor):
+ """Test resetting statistics."""
+ # Record some data
+ performance_monitor.start_request("test")
+ performance_monitor.end_request("test")
+
+ assert performance_monitor.total_requests > 0
+
+ # Reset
+ performance_monitor.reset()
+
+ assert performance_monitor.total_requests == 0
+ assert len(performance_monitor.request_times) == 0
+ assert len(performance_monitor.active_requests) == 0
+ assert len(performance_monitor.batch_metrics) == 0
+
+ def test_context_manager(self, performance_monitor):
+ """Test using monitor as context manager."""
+ with performance_monitor.time_request("context_test") as request_id:
+ assert request_id == "context_test"
+ assert "context_test" in performance_monitor.active_requests
+ time.sleep(0.001)
+
+ # Should automatically end timing
+ assert "context_test" not in performance_monitor.active_requests
+ assert performance_monitor.total_requests == 1
+
+
+class TestMetricsCollector:
+ """Test metrics collection functionality."""
+
+ @pytest.fixture
+ def metrics_collector(self):
+ """Create metrics collector instance."""
+ return MetricsCollector()
+
+ def test_collector_initialization(self, metrics_collector):
+ """Test collector initialization."""
+ assert len(metrics_collector.get_summary()) == 0
+ assert metrics_collector.max_history > 0
+
+ def test_record_counter_metric(self, metrics_collector):
+ """Test recording counter metrics."""
+ metrics_collector.record_counter("requests_total", 1)
+ metrics_collector.record_counter("requests_total", 1)
+ metrics_collector.record_counter("requests_total", 3)
+
+ # Check if the metric was recorded
+ summary = metrics_collector.get_summary()
+ assert "requests_total" in summary
+ assert summary["requests_total"]["count"] == 3
+
+ def test_record_gauge_metric(self, metrics_collector):
+ """Test recording gauge metrics."""
+ metrics_collector.record_gauge("memory_usage", 100)
+ metrics_collector.record_gauge("memory_usage", 150)
+ metrics_collector.record_gauge("memory_usage", 120)
+
+ # Check if the metrics were recorded
+ summary = metrics_collector.get_summary()
+ assert "memory_usage" in summary
+ assert summary["memory_usage"]["count"] == 3
+ assert summary["memory_usage"]["latest"] == 120
+
+ def test_record_histogram_metric(self, metrics_collector):
+ """Test recording histogram metrics."""
+ response_times = [0.01, 0.02, 0.015, 0.03, 0.012, 0.025]
+
+ for rt in response_times:
+ metrics_collector.record_timer("response_time", rt)
+
+ # Check if the metrics were recorded
+ summary = metrics_collector.get_summary()
+ assert "response_time" in summary
+ assert summary["response_time"]["count"] == len(response_times)
+
+ def test_record_with_labels(self, metrics_collector):
+ """Test recording metrics with labels."""
+ # Record metrics with different labels
+ metrics_collector.record(
+ "model_requests", 5,
+ labels={"model": "bert", "version": "v1"}
+ )
+ metrics_collector.record(
+ "model_requests", 3,
+ labels={"model": "gpt", "version": "v2"}
+ )
+
+ # Should store separately by labels
+ metrics = metrics_collector.get_all_metrics()
+ model_requests = metrics["model_requests"]
+
+ assert len(model_requests) == 2
+ assert any(item["value"] == 5 for item in model_requests)
+ assert any(item["value"] == 3 for item in model_requests)
+
+ def test_get_metric_nonexistent(self, metrics_collector):
+ """Test getting non-existent metric."""
+ value = metrics_collector.get_metric("nonexistent_metric")
+ assert value is None
+
+ def test_get_all_metrics(self, metrics_collector):
+ """Test getting all metrics."""
+ # Record various metrics
+ metrics_collector.record("counter_metric", 10, metric_type="counter")
+ metrics_collector.record("gauge_metric", 50, metric_type="gauge")
+ metrics_collector.record("histogram_metric", 0.1, metric_type="histogram")
+
+ all_metrics = metrics_collector.get_all_metrics()
+
+ assert isinstance(all_metrics, dict)
+ assert "counter_metric" in all_metrics
+ assert "gauge_metric" in all_metrics
+ assert "histogram_metric" in all_metrics
+
+ def test_calculate_percentiles(self, metrics_collector):
+ """Test percentile calculations."""
+ # Record histogram data
+ values = list(range(1, 101)) # 1 to 100
+ for value in values:
+ metrics_collector.record("test_histogram", value, metric_type="histogram")
+
+ percentiles = metrics_collector.calculate_percentiles("test_histogram", [50, 90, 95, 99])
+
+ assert isinstance(percentiles, dict)
+ assert 50 in percentiles
+ assert 90 in percentiles
+ assert 95 in percentiles
+ assert 99 in percentiles
+
+ # Check approximate correctness
+ assert 45 <= percentiles[50] <= 55 # 50th percentile ~50
+ assert 85 <= percentiles[90] <= 95 # 90th percentile ~90
+
+ def test_calculate_percentiles_nonexistent(self, metrics_collector):
+ """Test percentile calculation for non-existent metric."""
+ percentiles = metrics_collector.calculate_percentiles("nonexistent", [50, 90])
+ assert percentiles == {}
+
+ def test_export_metrics(self, metrics_collector):
+ """Test exporting metrics."""
+ # Record various metrics
+ metrics_collector.record("requests", 100, metric_type="counter")
+ metrics_collector.record("cpu_usage", 75.5, metric_type="gauge")
+
+ for i in range(10):
+ metrics_collector.record("latency", 0.01 * (i + 1), metric_type="histogram")
+
+ exported = metrics_collector.export_metrics()
+
+ assert isinstance(exported, dict)
+ assert "timestamp" in exported
+ assert "metrics" in exported
+ assert "collection_duration" in exported
+
+ metrics = exported["metrics"]
+ assert "requests" in metrics
+ assert "cpu_usage" in metrics
+ assert "latency" in metrics
+
+ def test_reset_metrics(self, metrics_collector):
+ """Test resetting all metrics."""
+ # Record some metrics
+ metrics_collector.record("test_counter", 5)
+ metrics_collector.record("test_gauge", 10)
+
+ assert len(metrics_collector.metrics) > 0
+
+ # Reset
+ metrics_collector.reset()
+
+ assert len(metrics_collector.metrics) == 0
+
+
+class TestMonitoringIntegration:
+ """Integration tests for monitoring components."""
+
+ def test_performance_monitor_metrics_collector_integration(self):
+ """Test integration between performance monitor and metrics collector."""
+ monitor = PerformanceMonitor()
+ collector = MetricsCollector()
+
+ # Record some performance data
+ for i in range(10):
+ request_id = f"request_{i}"
+ monitor.start_request(request_id)
+ time.sleep(0.001)
+ elapsed = monitor.end_request(request_id)
+
+ # Record in metrics collector
+ collector.record("request_time", elapsed, metric_type="histogram")
+ collector.record("requests_total", 1, metric_type="counter")
+
+ # Get performance stats
+ perf_stats = monitor.get_current_stats()
+
+ # Get metrics
+ total_requests = collector.get_metric("requests_total")
+ request_times = collector.get_metric("request_time")
+
+ assert perf_stats["total_requests"] == 10
+ assert total_requests == 10
+ assert len(request_times) == 10
+
+ def test_monitoring_with_context_managers(self):
+ """Test monitoring with context managers."""
+ monitor = PerformanceMonitor()
+ collector = MetricsCollector()
+
+ # Use context managers for timing
+ for i in range(5):
+ with monitor.time_request(f"request_{i}"):
+ time.sleep(0.001)
+ # Simulate work
+ collector.record("work_done", 1, metric_type="counter")
+
+ stats = monitor.get_current_stats()
+ work_done = collector.get_metric("work_done")
+
+ assert stats["total_requests"] == 5
+ assert work_done == 5
+
+
+class TestGlobalMonitoringInstances:
+ """Test global monitoring instances."""
+
+ def test_get_performance_monitor_singleton(self):
+ """Test global performance monitor singleton."""
+ monitor1 = get_performance_monitor()
+ monitor2 = get_performance_monitor()
+
+ # Should be the same instance
+ assert monitor1 is monitor2
+ assert isinstance(monitor1, PerformanceMonitor)
+
+ def test_get_metrics_collector_singleton(self):
+ """Test global metrics collector singleton."""
+ collector1 = get_metrics_collector()
+ collector2 = get_metrics_collector()
+
+ # Should be the same instance
+ assert collector1 is collector2
+ assert isinstance(collector1, MetricsCollector)
+
+ def test_global_instances_independence(self):
+ """Test that global instances are independent."""
+ monitor = get_performance_monitor()
+ collector = get_metrics_collector()
+
+ # They should be different objects
+ assert monitor is not collector
+
+ # Each should function independently
+ monitor.start_request("test")
+ collector.record("test_metric", 1)
+
+ # Both should have recorded data
+ assert len(monitor.active_requests) == 1
+ assert len(collector.metrics) > 0
+
+
+class TestMonitoringErrorHandling:
+ """Test error handling in monitoring components."""
+
+ def test_performance_monitor_error_handling(self):
+ """Test performance monitor error handling."""
+ monitor = PerformanceMonitor()
+
+ # Test with None request ID
+ monitor.start_request(None) # Should handle gracefully
+ elapsed = monitor.end_request(None)
+ assert elapsed == 0.0
+
+ # Test ending request multiple times
+ monitor.start_request("test")
+ monitor.end_request("test")
+ elapsed = monitor.end_request("test") # Already ended
+ assert elapsed == 0.0
+
+ def test_metrics_collector_error_handling(self):
+ """Test metrics collector error handling."""
+ collector = MetricsCollector()
+
+ # Test with None values
+ collector.record(None, 10) # Should handle gracefully
+ collector.record("test", None) # Should handle gracefully
+
+ # Test with invalid percentiles
+ percentiles = collector.calculate_percentiles("nonexistent", [150, -10])
+ assert percentiles == {}
+
+ def test_monitoring_with_exceptions(self):
+ """Test monitoring when exceptions occur."""
+ monitor = PerformanceMonitor()
+ collector = MetricsCollector()
+
+ # Simulate work that raises exception
+ try:
+ with monitor.time_request("failing_request"):
+ collector.record("attempts", 1, metric_type="counter")
+ raise ValueError("Simulated error")
+ except ValueError:
+ collector.record("errors", 1, metric_type="counter")
+
+ # Monitoring should still work
+ stats = monitor.get_current_stats()
+ attempts = collector.get_metric("attempts")
+ errors = collector.get_metric("errors")
+
+ assert stats["total_requests"] == 1
+ assert attempts == 1
+ assert errors == 1
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..12a86ff
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,275 @@
+# Tox configuration for torch-inference framework
+# Automates testing across multiple Python versions and environments
+
+[tox]
+minversion = 4.0
+envlist =
+ py{310,311,312}
+ coverage
+ lint
+ type-check
+ security
+ docs
+isolated_build = True
+skip_missing_interpreters = True
+
+[testenv]
+# Base test environment configuration
+deps =
+ pytest>=7.0.0
+ pytest-asyncio>=0.21.0
+ pytest-timeout
+ torch>=2.6.0
+ torchvision>=0.20.0
+ numpy>=2.1.0
+ pyyaml>=6.0.0
+ pillow>=11.0.0
+ requests>=2.32.0
+
+extras = dev
+
+commands =
+ pytest {posargs:tests/}
+
+# Environment variables
+setenv =
+ PYTHONPATH = {toxinidir}
+ ENVIRONMENT = test
+ DEVICE = cpu
+ LOG_LEVEL = INFO
+
+# Parallel execution
+parallel_show_output = True
+
+[testenv:coverage]
+# Coverage testing environment
+deps =
+ {[testenv]deps}
+ pytest-cov
+ coverage[toml]
+
+commands =
+ pytest --cov=framework --cov-report=html --cov-report=xml --cov-report=term-missing --cov-fail-under=80 {posargs:tests/}
+
+[testenv:unit]
+# Unit tests only
+commands =
+ pytest {posargs:tests/unit/}
+
+[testenv:integration]
+# Integration tests only
+commands =
+ pytest {posargs:tests/integration/}
+
+[testenv:smoke]
+# Quick smoke tests
+commands =
+ pytest -m smoke {posargs}
+
+[testenv:gpu]
+# GPU-specific tests (requires CUDA)
+deps =
+ {[testenv]deps}
+
+commands =
+ pytest -m gpu {posargs:tests/}
+
+setenv =
+ {[testenv]setenv}
+ DEVICE = cuda
+
+[testenv:tensorrt]
+# TensorRT-specific tests
+deps =
+ {[testenv]deps}
+ tensorrt>=10.7.0
+
+commands =
+ pytest -m tensorrt {posargs:tests/}
+
+[testenv:onnx]
+# ONNX-specific tests
+deps =
+ {[testenv]deps}
+ onnx>=1.14.0
+ onnxruntime>=1.16.0
+
+commands =
+ pytest -m onnx {posargs:tests/}
+
+[testenv:enterprise]
+# Enterprise feature tests
+commands =
+ pytest -m enterprise {posargs:tests/}
+
+[testenv:benchmark]
+# Performance benchmark tests
+deps =
+ {[testenv]deps}
+ pytest-benchmark
+
+commands =
+ pytest -m benchmark --benchmark-only {posargs:tests/}
+
+[testenv:lint]
+# Code linting and formatting checks
+deps =
+ black>=23.0.0
+ ruff>=0.1.0
+ isort>=5.12.0
+
+commands =
+ black --check --diff .
+ ruff check .
+ isort --check-only --diff .
+
+[testenv:format]
+# Code formatting
+deps =
+ black>=23.0.0
+ ruff>=0.1.0
+ isort>=5.12.0
+
+commands =
+ black .
+ ruff check --fix .
+ isort .
+
+[testenv:type-check]
+# Type checking with mypy
+deps =
+ mypy>=1.0.0
+ types-requests
+ types-PyYAML
+ types-Pillow
+
+commands =
+ mypy framework
+
+[testenv:security]
+# Security scanning
+deps =
+ bandit[toml]>=1.7.0
+ safety>=2.0.0
+
+commands =
+ bandit -r framework -f json -o bandit-report.json
+ bandit -r framework
+ safety check --json --output safety-report.json
+ safety check
+
+[testenv:docs]
+# Documentation building and testing
+deps =
+ mkdocs>=1.5.0
+ mkdocs-material>=9.0.0
+ mkdocstrings[python]>=0.24.0
+
+commands =
+ mkdocs build --strict
+
+[testenv:clean]
+# Clean up generated files
+deps =
+allowlist_externals =
+ rm
+ find
+
+commands =
+ rm -rf .pytest_cache
+ rm -rf htmlcov
+ rm -rf .coverage
+ rm -rf .tox
+ rm -rf build
+ rm -rf dist
+ rm -rf *.egg-info
+ find . -name __pycache__ -type d -exec rm -rf {} +
+ find . -name "*.pyc" -delete
+
+# Pytest configuration for tox
+[testenv:pytest]
+deps = {[testenv]deps}
+commands = pytest {posargs}
+
+# Performance testing with different configurations
+[testenv:perf-cpu]
+setenv =
+ {[testenv]setenv}
+ DEVICE = cpu
+
+commands =
+ pytest -m benchmark --benchmark-only --benchmark-sort=mean {posargs:tests/}
+
+[testenv:perf-gpu]
+setenv =
+ {[testenv]setenv}
+ DEVICE = cuda
+
+commands =
+ pytest -m benchmark --benchmark-only --benchmark-sort=mean {posargs:tests/}
+
+# Development environment
+[testenv:dev]
+# Development environment with all dependencies
+deps =
+ {[testenv]deps}
+ pytest-cov
+ pytest-benchmark
+ pytest-xdist
+ pytest-html
+ pre-commit
+
+commands =
+ pre-commit install
+ pytest {posargs}
+
+# CI/CD specific configurations
+[testenv:ci]
+# CI environment configuration
+deps =
+ {[testenv]deps}
+ pytest-cov
+ pytest-xdist
+
+commands =
+ pytest -n auto --cov=framework --cov-report=xml --cov-report=term --junitxml=junit.xml {posargs:tests/}
+
+# Docker testing
+[testenv:docker]
+# Docker-based testing
+platform = linux
+deps = {[testenv]deps}
+
+commands =
+ pytest {posargs:tests/}
+
+# Memory profiling
+[testenv:memory]
+# Memory usage profiling
+deps =
+ {[testenv]deps}
+ pytest-memray
+
+commands =
+ pytest --memray {posargs:tests/}
+
+# Test data management
+[testenv:test-data]
+# Download and manage test data/models
+allowlist_externals =
+ python
+
+commands =
+ python tests/models/create_test_models.py
+
+# Report generation
+[testenv:report]
+# Generate comprehensive test reports
+deps =
+ {[testenv]deps}
+ pytest-cov
+ pytest-html
+ pytest-benchmark
+
+commands =
+ pytest --cov=framework --cov-report=html --html=report.html --self-contained-html --benchmark-only --benchmark-json=benchmark.json {posargs:tests/}
diff --git a/uv-requirements.txt b/uv-requirements.txt
new file mode 100644
index 0000000..4baffdf
--- /dev/null
+++ b/uv-requirements.txt
@@ -0,0 +1,1941 @@
+# This file was autogenerated by uv via the following command:
+# uv export --format requirements-txt --output-file uv-requirements.txt
+-e .
+aiohappyeyeballs==2.6.1 \
+ --hash=sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558 \
+ --hash=sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8
+ # via aiohttp
+aiohttp==3.12.15 \
+ --hash=sha256:010cc9bbd06db80fe234d9003f67e97a10fe003bfbedb40da7d71c1008eda0fe \
+ --hash=sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645 \
+ --hash=sha256:098e92835b8119b54c693f2f88a1dec690e20798ca5f5fe5f0520245253ee0af \
+ --hash=sha256:1004e67962efabbaf3f03b11b4c43b834081c9e3f9b32b16a7d97d4708a9abe6 \
+ --hash=sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84 \
+ --hash=sha256:2776c7ec89c54a47029940177e75c8c07c29c66f73464784971d6a81904ce9d1 \
+ --hash=sha256:2abbb216a1d3a2fe86dbd2edce20cdc5e9ad0be6378455b05ec7f77361b3ab50 \
+ --hash=sha256:2c7d81a277fa78b2203ab626ced1487420e8c11a8e373707ab72d189fcdad20a \
+ --hash=sha256:2ce13fcfb0bb2f259fb42106cdc63fa5515fb85b7e87177267d89a771a660b79 \
+ --hash=sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c \
+ --hash=sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd \
+ --hash=sha256:3b6f0af863cf17e6222b1735a756d664159e58855da99cfe965134a3ff63b0b0 \
+ --hash=sha256:3beb14f053222b391bf9cf92ae82e0171067cc9c8f52453a0f1ec7c37df12a77 \
+ --hash=sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c \
+ --hash=sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab \
+ --hash=sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4 \
+ --hash=sha256:3f9d7c55b41ed687b9d7165b17672340187f87a773c98236c987f08c858145a9 \
+ --hash=sha256:40b3fee496a47c3b4a39a731954c06f0bd9bd3e8258c059a4beb76ac23f8e421 \
+ --hash=sha256:421da6fd326460517873274875c6c5a18ff225b40da2616083c5a34a7570b685 \
+ --hash=sha256:4420cf9d179ec8dfe4be10e7d0fe47d6d606485512ea2265b0d8c5113372771b \
+ --hash=sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693 \
+ --hash=sha256:4c39e87afe48aa3e814cac5f535bc6199180a53e38d3f51c5e2530f5aa4ec58c \
+ --hash=sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2 \
+ --hash=sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519 \
+ --hash=sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d \
+ --hash=sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea \
+ --hash=sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b \
+ --hash=sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0 \
+ --hash=sha256:6443cca89553b7a5485331bc9bedb2342b08d073fa10b8c7d1c60579c4a7b9bd \
+ --hash=sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb \
+ --hash=sha256:6c5f40ec615e5264f44b4282ee27628cea221fcad52f27405b80abb346d9f3f8 \
+ --hash=sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f \
+ --hash=sha256:74dad41b3458dbb0511e760fb355bb0b6689e0630de8a22b1b62a98777136e16 \
+ --hash=sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64 \
+ --hash=sha256:79b26fe467219add81d5e47b4a4ba0f2394e8b7c7c3198ed36609f9ba161aecb \
+ --hash=sha256:7c7dd29c7b5bda137464dc9bfc738d7ceea46ff70309859ffde8c022e9b08ba7 \
+ --hash=sha256:7fbc8a7c410bb3ad5d595bb7118147dfbb6449d862cc1125cf8867cb337e8728 \
+ --hash=sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7 \
+ --hash=sha256:83603f881e11f0f710f8e2327817c82e79431ec976448839f3cd05d7afe8f830 \
+ --hash=sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d \
+ --hash=sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d \
+ --hash=sha256:8faa08fcc2e411f7ab91d1541d9d597d3a90e9004180edb2072238c085eac8c2 \
+ --hash=sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9 \
+ --hash=sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315 \
+ --hash=sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d \
+ --hash=sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd \
+ --hash=sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d \
+ --hash=sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51 \
+ --hash=sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3 \
+ --hash=sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34 \
+ --hash=sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461 \
+ --hash=sha256:b5b7fe4972d48a4da367043b8e023fb70a04d1490aa7d68800e465d1b97e493b \
+ --hash=sha256:b6fc902bff74d9b1879ad55f5404153e2b33a82e72a95c89cec5eb6cc9e92fbc \
+ --hash=sha256:b761bac1192ef24e16706d761aefcb581438b34b13a2f069a6d343ec8fb693a5 \
+ --hash=sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7 \
+ --hash=sha256:bc4fbc61bb3548d3b482f9ac7ddd0f18c67e4225aaa4e8552b9f1ac7e6bda9e5 \
+ --hash=sha256:ced339d7c9b5030abad5854aa5413a77565e5b6e6248ff927d3e174baf3badf7 \
+ --hash=sha256:d3ce17ce0220383a0f9ea07175eeaa6aa13ae5a41f30bc61d84df17f0e9b1117 \
+ --hash=sha256:d5f1b4ce5bc528a6ee38dbf5f39bbf11dd127048726323b72b8e85769319ffc4 \
+ --hash=sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1 \
+ --hash=sha256:db71ce547012a5420a39c1b744d485cfb823564d01d5d20805977f5ea1345676 \
+ --hash=sha256:e153e8adacfe2af562861b72f8bc47f8a5c08e010ac94eebbe33dc21d677cd5b \
+ --hash=sha256:edd533a07da85baa4b423ee8839e3e91681c7bfa19b04260a469ee94b778bf6d \
+ --hash=sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d \
+ --hash=sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444 \
+ --hash=sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0 \
+ --hash=sha256:fc49c4de44977aa8601a00edbf157e9a421f227aa7eb477d9e3df48343311065 \
+ --hash=sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545 \
+ --hash=sha256:fe086edf38b2222328cdf89af0dde2439ee173b8ad7cb659b4e4c6f385b2be3d
+ # via torch-inference-optimized
+aiosignal==1.4.0 \
+ --hash=sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e \
+ --hash=sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7
+ # via aiohttp
+annotated-types==0.7.0 \
+ --hash=sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53 \
+ --hash=sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89
+ # via pydantic
+anyio==4.10.0 \
+ --hash=sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6 \
+ --hash=sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1
+ # via
+ # starlette
+ # torch-inference-optimized
+async-timeout==5.0.1 ; python_full_version < '3.11' \
+ --hash=sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c \
+ --hash=sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3
+ # via aiohttp
+attrs==25.3.0 \
+ --hash=sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3 \
+ --hash=sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b
+ # via aiohttp
+authlib==1.6.1 \
+ --hash=sha256:4dffdbb1460ba6ec8c17981a4c67af7d8af131231b5a36a88a1e8c80c111cdfd \
+ --hash=sha256:e9d2031c34c6309373ab845afc24168fe9e93dc52d252631f52642f21f5ed06e
+ # via safety
+backports-asyncio-runner==1.2.0 ; python_full_version < '3.11' \
+ --hash=sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5 \
+ --hash=sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162
+ # via pytest-asyncio
+backports-datetime-fromisoformat==2.0.3 ; python_full_version < '3.11' \
+ --hash=sha256:24a4da5ab3aa0cc293dc0662a0c6d1da1a011dc1edcbc3122a288cfed13a0b45 \
+ --hash=sha256:2df98ef1b76f5a58bb493dda552259ba60c3a37557d848e039524203951c9f06 \
+ --hash=sha256:39d57ea50aa5a524bb239688adc1d1d824c31b6094ebd39aa164d6cadb85de22 \
+ --hash=sha256:43e2d648e150777e13bbc2549cc960373e37bf65bd8a5d2e0cef40e16e5d8dd0 \
+ --hash=sha256:44c497a71f80cd2bcfc26faae8857cf8e79388e3d5fbf79d2354b8c360547d58 \
+ --hash=sha256:4ce6326fd86d5bae37813c7bf1543bae9e4c215ec6f5afe4c518be2635e2e005 \
+ --hash=sha256:4cf9c0a985d68476c1cabd6385c691201dda2337d7453fb4da9679ce9f23f4e7 \
+ --hash=sha256:58ea11e3bf912bd0a36b0519eae2c5b560b3cb972ea756e66b73fb9be460af01 \
+ --hash=sha256:5f681f638f10588fa3c101ee9ae2b63d3734713202ddfcfb6ec6cea0778a29d4 \
+ --hash=sha256:620e8e73bd2595dfff1b4d256a12b67fce90ece3de87b38e1dde46b910f46f4d \
+ --hash=sha256:6335a4c9e8af329cb1ded5ab41a666e1448116161905a94e054f205aa6d263bc \
+ --hash=sha256:66ce47ee1ba91e146149cf40565c3d750ea1be94faf660ca733d8601e0848147 \
+ --hash=sha256:8273fe7932db65d952a43e238318966eab9e49e8dd546550a41df12175cc2be4 \
+ --hash=sha256:8a375c7dbee4734318714a799b6c697223e4bbb57232af37fbfff88fb48a14c6 \
+ --hash=sha256:8b7e069910a66b3bba61df35b5f879e5253ff0821a70375b9daf06444d046fa4 \
+ --hash=sha256:90e202e72a3d5aae673fcc8c9a4267d56b2f532beeb9173361293625fe4d2039 \
+ --hash=sha256:a3b5d1d04a9e0f7b15aa1e647c750631a873b298cdd1255687bb68779fe8eb35 \
+ --hash=sha256:ac6272f87693e78209dc72e84cf9ab58052027733cd0721c55356d3c881791cf \
+ --hash=sha256:ac677b1664c4585c2e014739f6678137c8336815406052349c85898206ec7061 \
+ --hash=sha256:b58edc8f517b66b397abc250ecc737969486703a66eb97e01e6d51291b1a139d \
+ --hash=sha256:cd681460e9142f1249408e5aee6d178c6d89b49e06d44913c8fdfb6defda8d1c \
+ --hash=sha256:d144868a73002e6e2e6fef72333e7b0129cecdd121aa8f1edba7107fd067255d \
+ --hash=sha256:d7c8fac333bf860208fd522a5394369ee3c790d0aa4311f515fcc4b6c5ef8d75 \
+ --hash=sha256:e2e4b66e017253cdbe5a1de49e0eecff3f66cd72bcb1229d7db6e6b1832c0443 \
+ --hash=sha256:ec1b95986430e789c076610aea704db20874f0781b8624f648ca9fb6ef67c6e1 \
+ --hash=sha256:ee68bc8735ae5058695b76d3bb2aee1d137c052a11c8303f1e966aa23b72b65b \
+ --hash=sha256:ffe5f793db59e2f1d45ec35a1cf51404fdd69df9f6952a0c87c3060af4c00e32
+ # via marshmallow
+bandit==1.8.6 \
+ --hash=sha256:3348e934d736fcdb68b6aa4030487097e23a501adf3e7827b63658df464dddd0 \
+ --hash=sha256:dbfe9c25fc6961c2078593de55fd19f2559f9e45b99f1272341f5b95dea4e56b
+black==25.1.0 \
+ --hash=sha256:030b9759066a4ee5e5aca28c3c77f9c64789cdd4de8ac1df642c40b708be6171 \
+ --hash=sha256:055e59b198df7ac0b7efca5ad7ff2516bca343276c466be72eb04a3bcc1f82d7 \
+ --hash=sha256:0e519ecf93120f34243e6b0054db49c00a35f84f195d5bce7e9f5cfc578fc2da \
+ --hash=sha256:172b1dbff09f86ce6f4eb8edf9dede08b1fce58ba194c87d7a4f1a5aa2f5b3c2 \
+ --hash=sha256:1e2978f6df243b155ef5fa7e558a43037c3079093ed5d10fd84c43900f2d8ecc \
+ --hash=sha256:33496d5cd1222ad73391352b4ae8da15253c5de89b93a80b3e2c8d9a19ec2666 \
+ --hash=sha256:3b48735872ec535027d979e8dcb20bf4f70b5ac75a8ea99f127c106a7d7aba9f \
+ --hash=sha256:4b60580e829091e6f9238c848ea6750efed72140b91b048770b64e74fe04908b \
+ --hash=sha256:759e7ec1e050a15f89b770cefbf91ebee8917aac5c20483bc2d80a6c3a04df32 \
+ --hash=sha256:8f0b18a02996a836cc9c9c78e5babec10930862827b1b724ddfe98ccf2f2fe4f \
+ --hash=sha256:95e8176dae143ba9097f351d174fdaf0ccd29efb414b362ae3fd72bf0f710717 \
+ --hash=sha256:96c1c7cd856bba8e20094e36e0f948718dc688dba4a9d78c3adde52b9e6c2299 \
+ --hash=sha256:a22f402b410566e2d1c950708c77ebf5ebd5d0d88a6a2e87c86d9fb48afa0d18 \
+ --hash=sha256:a39337598244de4bae26475f77dda852ea00a93bd4c728e09eacd827ec929df0 \
+ --hash=sha256:afebb7098bfbc70037a053b91ae8437c3857482d3a690fefc03e9ff7aa9a5fd3 \
+ --hash=sha256:bce2e264d59c91e52d8000d507eb20a9aca4a778731a08cfff7e5ac4a4bb7096 \
+ --hash=sha256:db8ea9917d6f8fc62abd90d944920d95e73c83a5ee3383493e35d271aca872e9 \
+ --hash=sha256:ea0213189960bda9cf99be5b8c8ce66bb054af5e9e861249cd23471bd7b0b3ba
+cachetools==6.1.0 \
+ --hash=sha256:1c7bb3cf9193deaf3508b7c5f2a79986c13ea38965c5adcff1f84519cf39163e \
+ --hash=sha256:b4c4f404392848db3ce7aac34950d17be4d864da4b8b66911008e430bc544587
+ # via tox
+certifi==2025.8.3 \
+ --hash=sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407 \
+ --hash=sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5
+ # via requests
+cffi==1.17.1 ; platform_python_implementation != 'PyPy' \
+ --hash=sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8 \
+ --hash=sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2 \
+ --hash=sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15 \
+ --hash=sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36 \
+ --hash=sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824 \
+ --hash=sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17 \
+ --hash=sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf \
+ --hash=sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3 \
+ --hash=sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed \
+ --hash=sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702 \
+ --hash=sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1 \
+ --hash=sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8 \
+ --hash=sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903 \
+ --hash=sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6 \
+ --hash=sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d \
+ --hash=sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e \
+ --hash=sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be \
+ --hash=sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683 \
+ --hash=sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9 \
+ --hash=sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c \
+ --hash=sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4 \
+ --hash=sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655 \
+ --hash=sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67 \
+ --hash=sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65 \
+ --hash=sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41 \
+ --hash=sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6 \
+ --hash=sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401 \
+ --hash=sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6 \
+ --hash=sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3 \
+ --hash=sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93 \
+ --hash=sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4 \
+ --hash=sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c \
+ --hash=sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0 \
+ --hash=sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3 \
+ --hash=sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff \
+ --hash=sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5 \
+ --hash=sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd \
+ --hash=sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f \
+ --hash=sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5 \
+ --hash=sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14 \
+ --hash=sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d \
+ --hash=sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382 \
+ --hash=sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e \
+ --hash=sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a \
+ --hash=sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4 \
+ --hash=sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99 \
+ --hash=sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b
+ # via cryptography
+cfgv==3.4.0 \
+ --hash=sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9 \
+ --hash=sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560
+ # via pre-commit
+chardet==5.2.0 \
+ --hash=sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7 \
+ --hash=sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970
+ # via tox
+charset-normalizer==3.4.3 \
+ --hash=sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91 \
+ --hash=sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0 \
+ --hash=sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154 \
+ --hash=sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601 \
+ --hash=sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884 \
+ --hash=sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07 \
+ --hash=sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64 \
+ --hash=sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe \
+ --hash=sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f \
+ --hash=sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc \
+ --hash=sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa \
+ --hash=sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9 \
+ --hash=sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae \
+ --hash=sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d \
+ --hash=sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92 \
+ --hash=sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31 \
+ --hash=sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c \
+ --hash=sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f \
+ --hash=sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15 \
+ --hash=sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392 \
+ --hash=sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f \
+ --hash=sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8 \
+ --hash=sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491 \
+ --hash=sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0 \
+ --hash=sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0 \
+ --hash=sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f \
+ --hash=sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927 \
+ --hash=sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849 \
+ --hash=sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce \
+ --hash=sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14 \
+ --hash=sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c \
+ --hash=sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c \
+ --hash=sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a \
+ --hash=sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc \
+ --hash=sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096 \
+ --hash=sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14 \
+ --hash=sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30 \
+ --hash=sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b \
+ --hash=sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db \
+ --hash=sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5 \
+ --hash=sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce \
+ --hash=sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669 \
+ --hash=sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0 \
+ --hash=sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018 \
+ --hash=sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93 \
+ --hash=sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe \
+ --hash=sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049 \
+ --hash=sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a \
+ --hash=sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef \
+ --hash=sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2 \
+ --hash=sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16 \
+ --hash=sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f \
+ --hash=sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1 \
+ --hash=sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37 \
+ --hash=sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72 \
+ --hash=sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c \
+ --hash=sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9
+ # via requests
+click==8.2.1 \
+ --hash=sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202 \
+ --hash=sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b
+ # via
+ # black
+ # safety
+ # torch-inference-optimized
+ # typer
+ # uvicorn
+colorama==0.4.6 \
+ --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
+ --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
+ # via
+ # bandit
+ # click
+ # pytest
+ # tox
+ # tqdm
+coloredlogs==15.0.1 \
+ --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \
+ --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0
+ # via onnxruntime
+coverage==7.10.3 \
+ --hash=sha256:03db599f213341e2960430984e04cf35fb179724e052a3ee627a068653cf4a7c \
+ --hash=sha256:07009152f497a0464ffdf2634586787aea0e69ddd023eafb23fc38267db94b84 \
+ --hash=sha256:07790b4b37d56608536f7c1079bd1aa511567ac2966d33d5cec9cf520c50a7c8 \
+ --hash=sha256:08b989a06eb9dfacf96d42b7fb4c9a22bafa370d245dc22fa839f2168c6f9fa1 \
+ --hash=sha256:1007d6a2b3cf197c57105cc1ba390d9ff7f0bee215ced4dea530181e49c65ab4 \
+ --hash=sha256:187ecdcac21f9636d570e419773df7bd2fda2e7fa040f812e7f95d0bddf5f79a \
+ --hash=sha256:18ecc5d1b9a8c570f6c9b808fa9a2b16836b3dd5414a6d467ae942208b095f85 \
+ --hash=sha256:1ae22b97003c74186e034a93e4f946c75fad8c0ce8d92fbbc168b5e15ee2841f \
+ --hash=sha256:1af4461b25fe92889590d438905e1fc79a95680ec2a1ff69a591bb3fdb6c7157 \
+ --hash=sha256:1d4f9ce50b9261ad196dc2b2e9f1fbbee21651b54c3097a25ad783679fd18294 \
+ --hash=sha256:1f4e4d8e75f6fd3c6940ebeed29e3d9d632e1f18f6fb65d33086d99d4d073241 \
+ --hash=sha256:205a95b87ef4eb303b7bc5118b47b6b6604a644bcbdb33c336a41cfc0a08c06a \
+ --hash=sha256:24581ed69f132b6225a31b0228ae4885731cddc966f8a33fe5987288bdbbbd5e \
+ --hash=sha256:24d0c13de473b04920ddd6e5da3c08831b1170b8f3b17461d7429b61cad59ae0 \
+ --hash=sha256:2a90dd4505d3cc68b847ab10c5ee81822a968b5191664e8a0801778fa60459fa \
+ --hash=sha256:2ae8e7c56290b908ee817200c0b65929b8050bc28530b131fe7c6dfee3e7d86b \
+ --hash=sha256:30c601610a9b23807c5e9e2e442054b795953ab85d525c3de1b1b27cebeb2117 \
+ --hash=sha256:3262d19092771c83f3413831d9904b1ccc5f98da5de4ffa4ad67f5b20c7aaf7b \
+ --hash=sha256:3564aae76bce4b96e2345cf53b4c87e938c4985424a9be6a66ee902626edec4c \
+ --hash=sha256:3966bc9a76b09a40dc6063c8b10375e827ea5dfcaffae402dd65953bef4cba54 \
+ --hash=sha256:416a8d74dc0adfd33944ba2f405897bab87b7e9e84a391e09d241956bd953ce1 \
+ --hash=sha256:419d2a0f769f26cb1d05e9ccbc5eab4cb5d70231604d47150867c07822acbdf4 \
+ --hash=sha256:424ea93a323aa0f7f01174308ea78bde885c3089ec1bef7143a6d93c3e24ef64 \
+ --hash=sha256:449c1e2d3a84d18bd204258a897a87bc57380072eb2aded6a5b5226046207b42 \
+ --hash=sha256:46eae7893ba65f53c71284585a262f083ef71594f05ec5c85baf79c402369098 \
+ --hash=sha256:488e9b50dc5d2aa9521053cfa706209e5acf5289e81edc28291a24f4e4488f46 \
+ --hash=sha256:4a50ad2524ee7e4c2a95e60d2b0b83283bdfc745fe82359d567e4f15d3823eb5 \
+ --hash=sha256:4af09c7574d09afbc1ea7da9dcea23665c01f3bc1b1feb061dac135f98ffc53a \
+ --hash=sha256:4dd4564207b160d0d45c36a10bc0a3d12563028e8b48cd6459ea322302a156d7 \
+ --hash=sha256:53808194afdf948c462215e9403cca27a81cf150d2f9b386aee4dab614ae2ffe \
+ --hash=sha256:54e409dd64e5302b2a8fdf44ec1c26f47abd1f45a2dcf67bd161873ee05a59b8 \
+ --hash=sha256:5b3801b79fb2ad61e3c7e2554bab754fc5f105626056980a2b9cf3aef4f13f84 \
+ --hash=sha256:5ca3c9530ee072b7cb6a6ea7b640bcdff0ad3b334ae9687e521e59f79b1d0437 \
+ --hash=sha256:5fb742309766d7e48e9eb4dc34bc95a424707bc6140c0e7d9726e794f11b92a0 \
+ --hash=sha256:669fe0d4e69c575c52148511029b722ba8d26e8a3129840c2ce0522e1452b256 \
+ --hash=sha256:6b1f91cbc78c7112ab84ed2a8defbccd90f888fcae40a97ddd6466b0bec6ae8a \
+ --hash=sha256:6b4e25e0fa335c8aa26e42a52053f3786a61cc7622b4d54ae2dad994aa754fec \
+ --hash=sha256:812ba9250532e4a823b070b0420a36499859542335af3dca8f47fc6aa1a05619 \
+ --hash=sha256:8dd2ba5f0c7e7e8cc418be2f0c14c4d9e3f08b8fb8e4c0f83c2fe87d03eb655e \
+ --hash=sha256:913ceddb4289cbba3a310704a424e3fb7aac2bc0c3a23ea473193cb290cf17d4 \
+ --hash=sha256:992f48bf35b720e174e7fae916d943599f1a66501a2710d06c5f8104e0756ee1 \
+ --hash=sha256:9c8916d44d9e0fe6cdb2227dc6b0edd8bc6c8ef13438bbbf69af7482d9bb9833 \
+ --hash=sha256:9e92fa1f2bd5a57df9d00cf9ce1eb4ef6fccca4ceabec1c984837de55329db34 \
+ --hash=sha256:a181e4c2c896c2ff64c6312db3bda38e9ade2e1aa67f86a5628ae85873786cea \
+ --hash=sha256:a374d4e923814e8b72b205ef6b3d3a647bb50e66f3558582eda074c976923613 \
+ --hash=sha256:a83d4f134bab2c7ff758e6bb1541dd72b54ba295ced6a63d93efc2e20cb9b124 \
+ --hash=sha256:b0bac054d45af7cd938834b43a9878b36ea92781bcb009eab040a5b09e9927e3 \
+ --hash=sha256:b0dc69c60224cda33d384572da945759756e3f06b9cdac27f302f53961e63160 \
+ --hash=sha256:b6df359e59fa243c9925ae6507e27f29c46698359f45e568fd51b9315dbbe587 \
+ --hash=sha256:b96524d6e4a3ce6a75c56bb15dbd08023b0ae2289c254e15b9fbdddf0c577416 \
+ --hash=sha256:b99e87304ffe0eb97c5308447328a584258951853807afdc58b16143a530518a \
+ --hash=sha256:bce8b8180912914032785850d8f3aacb25ec1810f5f54afc4a8b114e7a9b55de \
+ --hash=sha256:bd8df1f83c0703fa3ca781b02d36f9ec67ad9cb725b18d486405924f5e4270bd \
+ --hash=sha256:bdb558a1d97345bde3a9f4d3e8d11c9e5611f748646e9bb61d7d612a796671b5 \
+ --hash=sha256:c112f04e075d3495fa3ed2200f71317da99608cbb2e9345bdb6de8819fc30571 \
+ --hash=sha256:c1e2e927ab3eadd7c244023927d646e4c15c65bb2ac7ae3c3e9537c013700d21 \
+ --hash=sha256:c2079d8cdd6f7373d628e14b3357f24d1db02c9dc22e6a007418ca7a2be0435a \
+ --hash=sha256:c5595fc4ad6a39312c786ec3326d7322d0cf10e3ac6a6df70809910026d67cfb \
+ --hash=sha256:c65e2a5b32fbe1e499f1036efa6eb9cb4ea2bf6f7168d0e7a5852f3024f471b1 \
+ --hash=sha256:c9e6331a8f09cb1fc8bda032752af03c366870b48cce908875ba2620d20d0ad4 \
+ --hash=sha256:cc0ee4b2ccd42cab7ee6be46d8a67d230cb33a0a7cd47a58b587a7063b6c6b0e \
+ --hash=sha256:ce01048199a91f07f96ca3074b0c14021f4fe7ffd29a3e6a188ac60a5c3a4af8 \
+ --hash=sha256:d48d2cb07d50f12f4f18d2bb75d9d19e3506c26d96fffabf56d22936e5ed8f7c \
+ --hash=sha256:d52989685ff5bf909c430e6d7f6550937bc6d6f3e6ecb303c97a86100efd4596 \
+ --hash=sha256:d7c3d02c2866deb217dce664c71787f4b25420ea3eaf87056f44fb364a3528f5 \
+ --hash=sha256:dabe662312a97958e932dee056f2659051d822552c0b866823e8ba1c2fe64770 \
+ --hash=sha256:daeefff05993e5e8c6e7499a8508e7bd94502b6b9a9159c84fd1fe6bce3151cb \
+ --hash=sha256:dec0d9bc15ee305e09fe2cd1911d3f0371262d3cfdae05d79515d8cb712b4869 \
+ --hash=sha256:e79367ef2cd9166acedcbf136a458dfe9a4a2dd4d1ee95738fb2ee581c56f667 \
+ --hash=sha256:eb329f1046888a36b1dc35504d3029e1dd5afe2196d94315d18c45ee380f67d5 \
+ --hash=sha256:ebc8791d346410d096818788877d675ca55c91db87d60e8f477bd41c6970ffc6 \
+ --hash=sha256:ec151569ddfccbf71bac8c422dce15e176167385a00cd86e887f9a80035ce8a5 \
+ --hash=sha256:ee221cf244757cdc2ac882e3062ab414b8464ad9c884c21e878517ea64b3fa26 \
+ --hash=sha256:f2ff2e2afdf0d51b9b8301e542d9c21a8d084fd23d4c8ea2b3a1b3c96f5f7397 \
+ --hash=sha256:f35580f19f297455f44afcd773c9c7a058e52eb6eb170aa31222e635f2e38b87 \
+ --hash=sha256:f4d1b837d1abf72187a61645dbf799e0d7705aa9232924946e1f57eb09a3bf00 \
+ --hash=sha256:f5983c132a62d93d71c9ef896a0b9bf6e6828d8d2ea32611f58684fba60bba35 \
+ --hash=sha256:fe72cbdd12d9e0f4aca873fa6d755e103888a7f9085e4a62d282d9d5b9f7928c
+ # via pytest-cov
+cryptography==45.0.6 \
+ --hash=sha256:00e8724bdad672d75e6f069b27970883179bd472cd24a63f6e620ca7e41cc0c5 \
+ --hash=sha256:048e7ad9e08cf4c0ab07ff7f36cc3115924e22e2266e034450a890d9e312dd74 \
+ --hash=sha256:0d9ef57b6768d9fa58e92f4947cea96ade1233c0e236db22ba44748ffedca394 \
+ --hash=sha256:18f878a34b90d688982e43f4b700408b478102dd58b3e39de21b5ebf6509c301 \
+ --hash=sha256:1b7fa6a1c1188c7ee32e47590d16a5a0646270921f8020efc9a511648e1b2e08 \
+ --hash=sha256:20ae4906a13716139d6d762ceb3e0e7e110f7955f3bc3876e3a07f5daadec5f3 \
+ --hash=sha256:20d15aed3ee522faac1a39fbfdfee25d17b1284bafd808e1640a74846d7c4d1b \
+ --hash=sha256:2384f2ab18d9be88a6e4f8972923405e2dbb8d3e16c6b43f15ca491d7831bd18 \
+ --hash=sha256:275ba5cc0d9e320cd70f8e7b96d9e59903c815ca579ab96c1e37278d231fc402 \
+ --hash=sha256:2dac5ec199038b8e131365e2324c03d20e97fe214af051d20c49db129844e8b3 \
+ --hash=sha256:31a2b9a10530a1cb04ffd6aa1cd4d3be9ed49f7d77a4dafe198f3b382f41545c \
+ --hash=sha256:3436128a60a5e5490603ab2adbabc8763613f638513ffa7d311c900a8349a2a0 \
+ --hash=sha256:3b5bf5267e98661b9b888a9250d05b063220dfa917a8203744454573c7eb79db \
+ --hash=sha256:3de77e4df42ac8d4e4d6cdb342d989803ad37707cf8f3fbf7b088c9cbdd46427 \
+ --hash=sha256:44647c5d796f5fc042bbc6d61307d04bf29bccb74d188f18051b635f20a9c75f \
+ --hash=sha256:550ae02148206beb722cfe4ef0933f9352bab26b087af00e48fdfb9ade35c5b3 \
+ --hash=sha256:599c8d7df950aa68baa7e98f7b73f4f414c9f02d0e8104a30c0182a07732638b \
+ --hash=sha256:5b64e668fc3528e77efa51ca70fadcd6610e8ab231e3e06ae2bab3b31c2b8ed9 \
+ --hash=sha256:5bd6020c80c5b2b2242d6c48487d7b85700f5e0038e67b29d706f98440d66eb5 \
+ --hash=sha256:5c966c732cf6e4a276ce83b6e4c729edda2df6929083a952cc7da973c539c719 \
+ --hash=sha256:629127cfdcdc6806dfe234734d7cb8ac54edaf572148274fa377a7d3405b0043 \
+ --hash=sha256:705bb7c7ecc3d79a50f236adda12ca331c8e7ecfbea51edd931ce5a7a7c4f012 \
+ --hash=sha256:780c40fb751c7d2b0c6786ceee6b6f871e86e8718a8ff4bc35073ac353c7cd02 \
+ --hash=sha256:7a3085d1b319d35296176af31c90338eeb2ddac8104661df79f80e1d9787b8b2 \
+ --hash=sha256:826b46dae41a1155a0c0e66fafba43d0ede1dc16570b95e40c4d83bfcf0a451d \
+ --hash=sha256:833dc32dfc1e39b7376a87b9a6a4288a10aae234631268486558920029b086ec \
+ --hash=sha256:cc4d66f5dc4dc37b89cfef1bd5044387f7a1f6f0abb490815628501909332d5d \
+ --hash=sha256:d063341378d7ee9c91f9d23b431a3502fc8bfacd54ef0a27baa72a0843b29159 \
+ --hash=sha256:e2a21a8eda2d86bb604934b6b37691585bd095c1f788530c1fcefc53a82b3453 \
+ --hash=sha256:e40b80ecf35ec265c452eea0ba94c9587ca763e739b8e559c128d23bff7ebbbf \
+ --hash=sha256:e5b3dda1b00fb41da3af4c5ef3f922a200e33ee5ba0f0bc9ecf0b0c173958385 \
+ --hash=sha256:ea3c42f2016a5bbf71825537c2ad753f2870191134933196bee408aac397b3d9 \
+ --hash=sha256:eccddbd986e43014263eda489abbddfbc287af5cddfd690477993dbb31e31016 \
+ --hash=sha256:ee411a1b977f40bd075392c80c10b58025ee5c6b47a822a33c1198598a7a5f05 \
+ --hash=sha256:f4028f29a9f38a2025abedb2e409973709c660d44319c61762202206ed577c42 \
+ --hash=sha256:f68f833a9d445cc49f01097d95c83a850795921b3f7cc6488731e69bde3288da \
+ --hash=sha256:fc022c1fa5acff6def2fc6d7819bbbd31ccddfe67d075331a65d9cfb28a20983
+ # via authlib
+distlib==0.4.0 \
+ --hash=sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16 \
+ --hash=sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d
+ # via virtualenv
+dparse==0.6.4 \
+ --hash=sha256:90b29c39e3edc36c6284c82c4132648eaf28a01863eb3c231c2512196132201a \
+ --hash=sha256:fbab4d50d54d0e739fbb4dedfc3d92771003a5b9aa8545ca7a7045e3b174af57
+ # via
+ # safety
+ # safety-schemas
+exceptiongroup==1.3.0 ; python_full_version < '3.11' \
+ --hash=sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10 \
+ --hash=sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88
+ # via
+ # anyio
+ # pytest
+execnet==2.1.1 \
+ --hash=sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc \
+ --hash=sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3
+ # via pytest-xdist
+fastapi==0.116.1 \
+ --hash=sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565 \
+ --hash=sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143
+ # via torch-inference-optimized
+filelock==3.12.4 \
+ --hash=sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4 \
+ --hash=sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd
+ # via
+ # huggingface-hub
+ # safety
+ # torch
+ # tox
+ # virtualenv
+flatbuffers==25.2.10 \
+ --hash=sha256:97e451377a41262f8d9bd4295cc836133415cc03d8cb966410a4af92eb00d26e \
+ --hash=sha256:ebba5f4d5ea615af3f7fd70fc310636fbb2bbd1f566ac0a23d98dd412de50051
+ # via onnxruntime
+frozenlist==1.7.0 \
+ --hash=sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f \
+ --hash=sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b \
+ --hash=sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949 \
+ --hash=sha256:1073557c941395fdfcfac13eb2456cb8aad89f9de27bae29fabca8e563b12615 \
+ --hash=sha256:15900082e886edb37480335d9d518cec978afc69ccbc30bd18610b7c1b22a718 \
+ --hash=sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df \
+ --hash=sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf \
+ --hash=sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5 \
+ --hash=sha256:1ed8d2fa095aae4bdc7fdd80351009a48d286635edffee66bf865e37a9125c50 \
+ --hash=sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb \
+ --hash=sha256:21884e23cffabb157a9dd7e353779077bf5b8f9a58e9b262c6caad2ef5f80a56 \
+ --hash=sha256:24c34bea555fe42d9f928ba0a740c553088500377448febecaa82cc3e88aa1fa \
+ --hash=sha256:284d233a8953d7b24f9159b8a3496fc1ddc00f4db99c324bd5fb5f22d8698ea7 \
+ --hash=sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43 \
+ --hash=sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f \
+ --hash=sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c \
+ --hash=sha256:34a69a85e34ff37791e94542065c8416c1afbf820b68f720452f636d5fb990cd \
+ --hash=sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c \
+ --hash=sha256:3789ebc19cb811163e70fe2bd354cea097254ce6e707ae42e56f45e31e96cb8e \
+ --hash=sha256:387cbfdcde2f2353f19c2f66bbb52406d06ed77519ac7ee21be0232147c2592d \
+ --hash=sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81 \
+ --hash=sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e \
+ --hash=sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657 \
+ --hash=sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478 \
+ --hash=sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2 \
+ --hash=sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca \
+ --hash=sha256:400ddd24ab4e55014bba442d917203c73b2846391dd42ca5e38ff52bb18c3c5e \
+ --hash=sha256:41be2964bd4b15bf575e5daee5a5ce7ed3115320fb3c2b71fca05582ffa4dc9e \
+ --hash=sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3 \
+ --hash=sha256:45a6f2fdbd10e074e8814eb98b05292f27bad7d1883afbe009d96abdcf3bc898 \
+ --hash=sha256:46d84d49e00c9429238a7ce02dc0be8f6d7cd0cd405abd1bebdc991bf27c15bd \
+ --hash=sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca \
+ --hash=sha256:4a646531fa8d82c87fe4bb2e596f23173caec9185bfbca5d583b4ccfb95183e2 \
+ --hash=sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104 \
+ --hash=sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba \
+ --hash=sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a \
+ --hash=sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1 \
+ --hash=sha256:61d1a5baeaac6c0798ff6edfaeaa00e0e412d49946c53fae8d4b8e8b3566c4ae \
+ --hash=sha256:69cac419ac6a6baad202c85aaf467b65ac860ac2e7f2ac1686dc40dbb52f6577 \
+ --hash=sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60 \
+ --hash=sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee \
+ --hash=sha256:6eb93efb8101ef39d32d50bce242c84bcbddb4f7e9febfa7b524532a239b4464 \
+ --hash=sha256:716a9973a2cc963160394f701964fe25012600f3d311f60c790400b00e568b61 \
+ --hash=sha256:72c1b0fe8fe451b34f12dce46445ddf14bd2a5bcad7e324987194dc8e3a74c86 \
+ --hash=sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01 \
+ --hash=sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb \
+ --hash=sha256:79b2ffbba483f4ed36a0f236ccb85fbb16e670c9238313709638167670ba235f \
+ --hash=sha256:7edf5c043c062462f09b6820de9854bf28cc6cc5b6714b383149745e287181a8 \
+ --hash=sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d \
+ --hash=sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00 \
+ --hash=sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b \
+ --hash=sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b \
+ --hash=sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146 \
+ --hash=sha256:960d67d0611f4c87da7e2ae2eacf7ea81a5be967861e0c63cf205215afbfac59 \
+ --hash=sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08 \
+ --hash=sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e \
+ --hash=sha256:9b35db7ce1cd71d36ba24f80f0c9e7cff73a28d7a74e91fe83e23d27c7828750 \
+ --hash=sha256:a0fd1bad056a3600047fb9462cff4c5322cebc59ebf5d0a3725e0ee78955001d \
+ --hash=sha256:a26f205c9ca5829cbf82bb2a84b5c36f7184c4316617d7ef1b271a56720d6b30 \
+ --hash=sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3 \
+ --hash=sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d \
+ --hash=sha256:aa51e147a66b2d74de1e6e2cf5921890de6b0f4820b257465101d7f37b49fb5a \
+ --hash=sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8 \
+ --hash=sha256:ac64b6478722eeb7a3313d494f8342ef3478dff539d17002f849101b212ef97c \
+ --hash=sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1 \
+ --hash=sha256:af369aa35ee34f132fcfad5be45fbfcde0e3a5f6a1ec0712857f286b7d20cca9 \
+ --hash=sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e \
+ --hash=sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384 \
+ --hash=sha256:bcacfad3185a623fa11ea0e0634aac7b691aa925d50a440f39b458e41c561d98 \
+ --hash=sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb \
+ --hash=sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4 \
+ --hash=sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65 \
+ --hash=sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08 \
+ --hash=sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43 \
+ --hash=sha256:cc4df77d638aa2ed703b878dd093725b72a824c3c546c076e8fdf276f78ee84a \
+ --hash=sha256:ce48b2fece5aeb45265bb7a58259f45027db0abff478e3077e12b05b17fb9da7 \
+ --hash=sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d \
+ --hash=sha256:d50ac7627b3a1bd2dcef6f9da89a772694ec04d9a61b66cf87f7d9446b4a0c31 \
+ --hash=sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d \
+ --hash=sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e \
+ --hash=sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025 \
+ --hash=sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee \
+ --hash=sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1 \
+ --hash=sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74 \
+ --hash=sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b \
+ --hash=sha256:f89f65d85774f1797239693cef07ad4c97fdd0639544bad9ac4b869782eb1981 \
+ --hash=sha256:fe2365ae915a1fafd982c146754e1de6ab3478def8a59c86e1f7242d794f97d5
+ # via
+ # aiohttp
+ # aiosignal
+fsspec==2025.7.0 \
+ --hash=sha256:786120687ffa54b8283d942929540d8bc5ccfa820deb555a2b5d0ed2b737bf58 \
+ --hash=sha256:8b012e39f63c7d5f10474de957f3ab793b47b45ae7d39f2fb735f8bbe25c0e21
+ # via
+ # huggingface-hub
+ # torch
+h11==0.16.0 \
+ --hash=sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1 \
+ --hash=sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86
+ # via uvicorn
+hf-xet==1.1.7 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64' \
+ --hash=sha256:18b61bbae92d56ae731b92087c44efcac216071182c603fc535f8e29ec4b09b8 \
+ --hash=sha256:20cec8db4561338824a3b5f8c19774055b04a8df7fff0cb1ff2cb1a0c1607b80 \
+ --hash=sha256:2e356da7d284479ae0f1dea3cf5a2f74fdf925d6dca84ac4341930d892c7cb34 \
+ --hash=sha256:60dae4b44d520819e54e216a2505685248ec0adbdb2dd4848b17aa85a0375cde \
+ --hash=sha256:6efaaf1a5a9fc3a501d3e71e88a6bfebc69ee3a716d0e713a931c8b8d920038f \
+ --hash=sha256:713f2bff61b252f8523739969f247aa354ad8e6d869b8281e174e2ea1bb8d604 \
+ --hash=sha256:751571540f9c1fbad9afcf222a5fb96daf2384bf821317b8bfb0c59d86078513 \
+ --hash=sha256:b109f4c11e01c057fc82004c9e51e6cdfe2cb230637644ade40c599739067b2e
+ # via huggingface-hub
+huggingface-hub==0.34.4 \
+ --hash=sha256:9b365d781739c93ff90c359844221beef048403f1bc1f1c123c191257c3c890a \
+ --hash=sha256:a4228daa6fb001be3f4f4bdaf9a0db00e1739235702848df00885c9b5742c85c
+ # via torch-inference-optimized
+humanfriendly==10.0 \
+ --hash=sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477 \
+ --hash=sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc
+ # via coloredlogs
+identify==2.6.13 \
+ --hash=sha256:60381139b3ae39447482ecc406944190f690d4a2997f2584062089848361b33b \
+ --hash=sha256:da8d6c828e773620e13bfa86ea601c5a5310ba4bcd65edf378198b56a1f9fb32
+ # via pre-commit
+idna==3.10 \
+ --hash=sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9 \
+ --hash=sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3
+ # via
+ # anyio
+ # requests
+ # yarl
+iniconfig==2.1.0 \
+ --hash=sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7 \
+ --hash=sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760
+ # via pytest
+jinja2==3.1.6 \
+ --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \
+ --hash=sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67
+ # via
+ # pytest-html
+ # safety
+ # torch
+markdown-it-py==3.0.0 \
+ --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \
+ --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb
+ # via rich
+markupsafe==3.0.2 \
+ --hash=sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4 \
+ --hash=sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30 \
+ --hash=sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9 \
+ --hash=sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396 \
+ --hash=sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028 \
+ --hash=sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca \
+ --hash=sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557 \
+ --hash=sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832 \
+ --hash=sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b \
+ --hash=sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579 \
+ --hash=sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a \
+ --hash=sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c \
+ --hash=sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c \
+ --hash=sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22 \
+ --hash=sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094 \
+ --hash=sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb \
+ --hash=sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e \
+ --hash=sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5 \
+ --hash=sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a \
+ --hash=sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d \
+ --hash=sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b \
+ --hash=sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8 \
+ --hash=sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225 \
+ --hash=sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c \
+ --hash=sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87 \
+ --hash=sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d \
+ --hash=sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93 \
+ --hash=sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf \
+ --hash=sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158 \
+ --hash=sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84 \
+ --hash=sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb \
+ --hash=sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48 \
+ --hash=sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171 \
+ --hash=sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c \
+ --hash=sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6 \
+ --hash=sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd \
+ --hash=sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d \
+ --hash=sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1 \
+ --hash=sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d \
+ --hash=sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca \
+ --hash=sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a \
+ --hash=sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe \
+ --hash=sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798 \
+ --hash=sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c \
+ --hash=sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8 \
+ --hash=sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f \
+ --hash=sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f \
+ --hash=sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0 \
+ --hash=sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79 \
+ --hash=sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430 \
+ --hash=sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50
+ # via jinja2
+marshmallow==4.0.0 \
+ --hash=sha256:3b6e80aac299a7935cfb97ed01d1854fb90b5079430969af92118ea1b12a8d55 \
+ --hash=sha256:e7b0528337e9990fd64950f8a6b3a1baabed09ad17a0dfb844d701151f92d203
+ # via safety
+mdurl==0.1.2 \
+ --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \
+ --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba
+ # via markdown-it-py
+mpmath==1.3.0 \
+ --hash=sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f \
+ --hash=sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c
+ # via sympy
+multidict==6.6.3 \
+ --hash=sha256:02fd8f32d403a6ff13864b0851f1f523d4c988051eea0471d4f1fd8010f11134 \
+ --hash=sha256:04cbcce84f63b9af41bad04a54d4cc4e60e90c35b9e6ccb130be2d75b71f8c17 \
+ --hash=sha256:056bebbeda16b2e38642d75e9e5310c484b7c24e3841dc0fb943206a72ec89d6 \
+ --hash=sha256:05db2f66c9addb10cfa226e1acb363450fab2ff8a6df73c622fefe2f5af6d4e7 \
+ --hash=sha256:0b9e59946b49dafaf990fd9c17ceafa62976e8471a14952163d10a7a630413a9 \
+ --hash=sha256:0db58da8eafb514db832a1b44f8fa7906fdd102f7d982025f816a93ba45e3dcb \
+ --hash=sha256:0f1130b896ecb52d2a1e615260f3ea2af55fa7dc3d7c3003ba0c3121a759b18b \
+ --hash=sha256:10bea2ee839a759ee368b5a6e47787f399b41e70cf0c20d90dfaf4158dfb4e55 \
+ --hash=sha256:12f4581d2930840295c461764b9a65732ec01250b46c6b2c510d7ee68872b140 \
+ --hash=sha256:135631cb6c58eac37d7ac0df380294fecdc026b28837fa07c02e459c7fb9c54e \
+ --hash=sha256:14117a41c8fdb3ee19c743b1c027da0736fdb79584d61a766da53d399b71176c \
+ --hash=sha256:159ca68bfd284a8860f8d8112cf0521113bffd9c17568579e4d13d1f1dc76b65 \
+ --hash=sha256:18f4eba0cbac3546b8ae31e0bbc55b02c801ae3cbaf80c247fcdd89b456ff58c \
+ --hash=sha256:1bf99b4daf908c73856bd87ee0a2499c3c9a3d19bb04b9c6025e66af3fd07462 \
+ --hash=sha256:208b9b9757060b9faa6f11ab4bc52846e4f3c2fb8b14d5680c8aac80af3dc751 \
+ --hash=sha256:20c5a0c3c13a15fd5ea86c42311859f970070e4e24de5a550e99d7c271d76318 \
+ --hash=sha256:2334cfb0fa9549d6ce2c21af2bfbcd3ac4ec3646b1b1581c88e3e2b1779ec92b \
+ --hash=sha256:274d416b0df887aef98f19f21578653982cfb8a05b4e187d4a17103322eeaf8f \
+ --hash=sha256:2e4cc8d848cd4fe1cdee28c13ea79ab0ed37fc2e89dd77bac86a2e7959a8c3bc \
+ --hash=sha256:346055630a2df2115cd23ae271910b4cae40f4e336773550dca4889b12916e75 \
+ --hash=sha256:3713303e4a6663c6d01d648a68f2848701001f3390a030edaaf3fc949c90bf7c \
+ --hash=sha256:3893a0d7d28a7fe6ca7a1f760593bc13038d1d35daf52199d431b61d2660602b \
+ --hash=sha256:42ca5aa9329a63be8dc49040f63817d1ac980e02eeddba763a9ae5b4027b9c9c \
+ --hash=sha256:43571f785b86afd02b3855c5ac8e86ec921b760298d6f82ff2a61daf5a35330b \
+ --hash=sha256:4ef421045f13879e21c994b36e728d8e7d126c91a64b9185810ab51d474f27e7 \
+ --hash=sha256:500b84f51654fdc3944e936f2922114349bf8fdcac77c3092b03449f0e5bc2b3 \
+ --hash=sha256:531e331a2ee53543ab32b16334e2deb26f4e6b9b28e41f8e0c87e99a6c8e2d69 \
+ --hash=sha256:540d3c06d48507357a7d57721e5094b4f7093399a0106c211f33540fdc374d55 \
+ --hash=sha256:555ff55a359302b79de97e0468e9ee80637b0de1fce77721639f7cd9440b3a10 \
+ --hash=sha256:5633a82fba8e841bc5c5c06b16e21529573cd654f67fd833650a215520a6210e \
+ --hash=sha256:5bd8d6f793a787153956cd35e24f60485bf0651c238e207b9a54f7458b16d539 \
+ --hash=sha256:639ecc9fe7cd73f2495f62c213e964843826f44505a3e5d82805aa85cac6f89e \
+ --hash=sha256:67c92ed673049dec52d7ed39f8cf9ebbadf5032c774058b4406d18c8f8fe7063 \
+ --hash=sha256:68e9e12ed00e2089725669bdc88602b0b6f8d23c0c95e52b95f0bc69f7fe9b55 \
+ --hash=sha256:6c1e61bb4f80895c081790b6b09fa49e13566df8fbff817da3f85b3a8192e36b \
+ --hash=sha256:70b72e749a4f6e7ed8fb334fa8d8496384840319512746a5f42fa0aec79f4d61 \
+ --hash=sha256:70d974eaaa37211390cd02ef93b7e938de564bbffa866f0b08d07e5e65da783d \
+ --hash=sha256:712b348f7f449948e0a6c4564a21c7db965af900973a67db432d724619b3c680 \
+ --hash=sha256:72d8815f2cd3cf3df0f83cac3f3ef801d908b2d90409ae28102e0553af85545a \
+ --hash=sha256:7394888236621f61dcdd25189b2768ae5cc280f041029a5bcf1122ac63df79f9 \
+ --hash=sha256:73ab034fb8d58ff85c2bcbadc470efc3fafeea8affcf8722855fb94557f14cc5 \
+ --hash=sha256:766a4a5996f54361d8d5a9050140aa5362fe48ce51c755a50c0bc3706460c430 \
+ --hash=sha256:769841d70ca8bdd140a715746199fc6473414bd02efd678d75681d2d6a8986c5 \
+ --hash=sha256:798a9eb12dab0a6c2e29c1de6f3468af5cb2da6053a20dfa3344907eed0937cc \
+ --hash=sha256:7af039820cfd00effec86bda5d8debef711a3e86a1d3772e85bea0f243a4bd65 \
+ --hash=sha256:7c6df517cf177da5d47ab15407143a89cd1a23f8b335f3a28d57e8b0a3dbb884 \
+ --hash=sha256:81ef2f64593aba09c5212a3d0f8c906a0d38d710a011f2f42759704d4557d3f2 \
+ --hash=sha256:877443eaaabcd0b74ff32ebeed6f6176c71850feb7d6a1d2db65945256ea535c \
+ --hash=sha256:8db10f29c7541fc5da4defd8cd697e1ca429db743fa716325f236079b96f775a \
+ --hash=sha256:8df25594989aebff8a130f7899fa03cbfcc5d2b5f4a461cf2518236fe6f15961 \
+ --hash=sha256:900eb9f9da25ada070f8ee4a23f884e0ee66fe4e1a38c3af644256a508ad81ca \
+ --hash=sha256:934796c81ea996e61914ba58064920d6cad5d99140ac3167901eb932150e2e56 \
+ --hash=sha256:94c47ea3ade005b5976789baaed66d4de4480d0a0bf31cef6edaa41c1e7b56a6 \
+ --hash=sha256:9c19cea2a690f04247d43f366d03e4eb110a0dc4cd1bbeee4d445435428ed35b \
+ --hash=sha256:9e236a7094b9c4c1b7585f6b9cca34b9d833cf079f7e4c49e6a4a6ec9bfdc68f \
+ --hash=sha256:9e864486ef4ab07db5e9cb997bad2b681514158d6954dd1958dfb163b83d53e6 \
+ --hash=sha256:9ed948328aec2072bc00f05d961ceadfd3e9bfc2966c1319aeaf7b7c21219183 \
+ --hash=sha256:9f5b28c074c76afc3e4c610c488e3493976fe0e596dd3db6c8ddfbb0134dcac5 \
+ --hash=sha256:9f97e181f344a0ef3881b573d31de8542cc0dbc559ec68c8f8b5ce2c2e91646d \
+ --hash=sha256:a2be5b7b35271f7fff1397204ba6708365e3d773579fe2a30625e16c4b4ce817 \
+ --hash=sha256:ab0a34a007704c625e25a9116c6770b4d3617a071c8a7c30cd338dfbadfe6485 \
+ --hash=sha256:acf6b97bd0884891af6a8b43d0f586ab2fcf8e717cbd47ab4bdddc09e20652d8 \
+ --hash=sha256:b24576f208793ebae00280c59927c3b7c2a3b1655e443a25f753c4611bc1c373 \
+ --hash=sha256:b8fee016722550a2276ca2cb5bb624480e0ed2bd49125b2b73b7010b9090e888 \
+ --hash=sha256:b9cbc60010de3562545fa198bfc6d3825df430ea96d2cc509c39bd71e2e7d648 \
+ --hash=sha256:b9fe5a0e57c6dbd0e2ce81ca66272282c32cd11d31658ee9553849d91289e1c1 \
+ --hash=sha256:bc7f6fbc61b1c16050a389c630da0b32fc6d4a3d191394ab78972bf5edc568c2 \
+ --hash=sha256:bd0578596e3a835ef451784053cfd327d607fc39ea1a14812139339a18a0dbc3 \
+ --hash=sha256:bf9bd1fd5eec01494e0f2e8e446a74a85d5e49afb63d75a9934e4a5423dba21d \
+ --hash=sha256:c60b401f192e79caec61f166da9c924e9f8bc65548d4246842df91651e83d600 \
+ --hash=sha256:cdf22e4db76d323bcdc733514bf732e9fb349707c98d341d40ebcc6e9318ef3d \
+ --hash=sha256:ce8b7693da41a3c4fde5871c738a81490cea5496c671d74374c8ab889e1834fb \
+ --hash=sha256:d4e47d8faffaae822fb5cba20937c048d4f734f43572e7079298a6c39fb172cb \
+ --hash=sha256:dbc7cf464cc6d67e83e136c9f55726da3a30176f020a36ead246eceed87f1cd8 \
+ --hash=sha256:dd7793bab517e706c9ed9d7310b06c8672fd0aeee5781bfad612f56b8e0f7d14 \
+ --hash=sha256:e098c17856a8c9ade81b4810888c5ad1914099657226283cab3062c0540b0643 \
+ --hash=sha256:e0cb0ab69915c55627c933f0b555a943d98ba71b4d1c57bc0d0a66e2567c7471 \
+ --hash=sha256:e252017a817fad7ce05cafbe5711ed40faeb580e63b16755a3a24e66fa1d87c0 \
+ --hash=sha256:e2db616467070d0533832d204c54eea6836a5e628f2cb1e6dfd8cd6ba7277cb7 \
+ --hash=sha256:e4e15d2138ee2694e038e33b7c3da70e6b0ad8868b9f8094a72e1414aeda9c1a \
+ --hash=sha256:e5511cb35f5c50a2db21047c875eb42f308c5583edf96bd8ebf7d770a9d68f6d \
+ --hash=sha256:e5e8523bb12d7623cd8300dbd91b9e439a46a028cd078ca695eb66ba31adee3c \
+ --hash=sha256:e5f481cccb3c5c5e5de5d00b5141dc589c1047e60d07e85bbd7dea3d4580d63f \
+ --hash=sha256:e924fb978615a5e33ff644cc42e6aa241effcf4f3322c09d4f8cebde95aff5f8 \
+ --hash=sha256:e93089c1570a4ad54c3714a12c2cef549dc9d58e97bcded193d928649cab78e9 \
+ --hash=sha256:e995a34c3d44ab511bfc11aa26869b9d66c2d8c799fa0e74b28a473a692532d6 \
+ --hash=sha256:ef43b5dd842382329e4797c46f10748d8c2b6e0614f46b4afe4aee9ac33159df \
+ --hash=sha256:ef58340cc896219e4e653dade08fea5c55c6df41bcc68122e3be3e9d873d9a7b \
+ --hash=sha256:f114d8478733ca7388e7c7e0ab34b72547476b97009d643644ac33d4d3fe1821 \
+ --hash=sha256:f3aa090106b1543f3f87b2041eef3c156c8da2aed90c63a2fbed62d875c49c37 \
+ --hash=sha256:f3fc723ab8a5c5ed6c50418e9bfcd8e6dceba6c271cee6728a10a4ed8561520c \
+ --hash=sha256:fc9dc435ec8699e7b602b94fe0cd4703e69273a01cbc34409af29e7820f777f1
+ # via
+ # aiohttp
+ # yarl
+mypy==1.17.1 \
+ --hash=sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341 \
+ --hash=sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5 \
+ --hash=sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849 \
+ --hash=sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733 \
+ --hash=sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81 \
+ --hash=sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403 \
+ --hash=sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6 \
+ --hash=sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01 \
+ --hash=sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91 \
+ --hash=sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972 \
+ --hash=sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd \
+ --hash=sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0 \
+ --hash=sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19 \
+ --hash=sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb \
+ --hash=sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd \
+ --hash=sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7 \
+ --hash=sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056 \
+ --hash=sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7 \
+ --hash=sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a \
+ --hash=sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed \
+ --hash=sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94 \
+ --hash=sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9 \
+ --hash=sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58 \
+ --hash=sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5 \
+ --hash=sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a \
+ --hash=sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df \
+ --hash=sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb \
+ --hash=sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390 \
+ --hash=sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b \
+ --hash=sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b \
+ --hash=sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14 \
+ --hash=sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b
+mypy-extensions==1.1.0 \
+ --hash=sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505 \
+ --hash=sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558
+ # via
+ # black
+ # mypy
+networkx==3.4.2 ; python_full_version < '3.11' \
+ --hash=sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1 \
+ --hash=sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f
+ # via torch
+networkx==3.5 ; python_full_version >= '3.11' \
+ --hash=sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec \
+ --hash=sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037
+ # via torch
+nodeenv==1.9.1 \
+ --hash=sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f \
+ --hash=sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9
+ # via pre-commit
+numpy==2.2.6 ; python_full_version < '3.11' \
+ --hash=sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff \
+ --hash=sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47 \
+ --hash=sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84 \
+ --hash=sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d \
+ --hash=sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6 \
+ --hash=sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f \
+ --hash=sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b \
+ --hash=sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49 \
+ --hash=sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163 \
+ --hash=sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571 \
+ --hash=sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42 \
+ --hash=sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff \
+ --hash=sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491 \
+ --hash=sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4 \
+ --hash=sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566 \
+ --hash=sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf \
+ --hash=sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40 \
+ --hash=sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd \
+ --hash=sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06 \
+ --hash=sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282 \
+ --hash=sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680 \
+ --hash=sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db \
+ --hash=sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3 \
+ --hash=sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90 \
+ --hash=sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1 \
+ --hash=sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289 \
+ --hash=sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab \
+ --hash=sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c \
+ --hash=sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d \
+ --hash=sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb \
+ --hash=sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d \
+ --hash=sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a \
+ --hash=sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf \
+ --hash=sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1 \
+ --hash=sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2 \
+ --hash=sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a \
+ --hash=sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543 \
+ --hash=sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00 \
+ --hash=sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c \
+ --hash=sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f \
+ --hash=sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd \
+ --hash=sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868 \
+ --hash=sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303 \
+ --hash=sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83 \
+ --hash=sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3 \
+ --hash=sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d \
+ --hash=sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87 \
+ --hash=sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa \
+ --hash=sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f \
+ --hash=sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae \
+ --hash=sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda \
+ --hash=sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915 \
+ --hash=sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249 \
+ --hash=sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de \
+ --hash=sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8
+ # via
+ # onnx
+ # onnxruntime
+ # opencv-python
+ # torch-inference-optimized
+ # torchvision
+numpy==2.3.2 ; python_full_version >= '3.11' \
+ --hash=sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5 \
+ --hash=sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b \
+ --hash=sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631 \
+ --hash=sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58 \
+ --hash=sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b \
+ --hash=sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc \
+ --hash=sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089 \
+ --hash=sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf \
+ --hash=sha256:14a91ebac98813a49bc6aa1a0dfc09513dcec1d97eaf31ca21a87221a1cdcb15 \
+ --hash=sha256:1f91e5c028504660d606340a084db4b216567ded1056ea2b4be4f9d10b67197f \
+ --hash=sha256:20b8200721840f5621b7bd03f8dcd78de33ec522fc40dc2641aa09537df010c3 \
+ --hash=sha256:240259d6564f1c65424bcd10f435145a7644a65a6811cfc3201c4a429ba79170 \
+ --hash=sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910 \
+ --hash=sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91 \
+ --hash=sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45 \
+ --hash=sha256:2c3271cc4097beb5a60f010bcc1cc204b300bb3eafb4399376418a83a1c6373c \
+ --hash=sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f \
+ --hash=sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b \
+ --hash=sha256:4209f874d45f921bde2cff1ffcd8a3695f545ad2ffbef6d3d3c6768162efab89 \
+ --hash=sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a \
+ --hash=sha256:4ae6863868aaee2f57503c7a5052b3a2807cf7a3914475e637a0ecd366ced220 \
+ --hash=sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e \
+ --hash=sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab \
+ --hash=sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2 \
+ --hash=sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b \
+ --hash=sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370 \
+ --hash=sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2 \
+ --hash=sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee \
+ --hash=sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619 \
+ --hash=sha256:69779198d9caee6e547adb933941ed7520f896fd9656834c300bdf4dd8642712 \
+ --hash=sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1 \
+ --hash=sha256:71669b5daae692189540cffc4c439468d35a3f84f0c88b078ecd94337f6cb0ec \
+ --hash=sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a \
+ --hash=sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450 \
+ --hash=sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a \
+ --hash=sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2 \
+ --hash=sha256:7a0e27186e781a69959d0230dd9909b5e26024f8da10683bd6344baea1885168 \
+ --hash=sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2 \
+ --hash=sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73 \
+ --hash=sha256:8446acd11fe3dc1830568c941d44449fd5cb83068e5c70bd5a470d323d448296 \
+ --hash=sha256:852ae5bed3478b92f093e30f785c98e0cb62fa0a939ed057c31716e18a7a22b9 \
+ --hash=sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125 \
+ --hash=sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0 \
+ --hash=sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19 \
+ --hash=sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b \
+ --hash=sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f \
+ --hash=sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2 \
+ --hash=sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f \
+ --hash=sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a \
+ --hash=sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6 \
+ --hash=sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286 \
+ --hash=sha256:aa098a5ab53fa407fded5870865c6275a5cd4101cfdef8d6fafc48286a96e981 \
+ --hash=sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f \
+ --hash=sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2 \
+ --hash=sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0 \
+ --hash=sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b \
+ --hash=sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b \
+ --hash=sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56 \
+ --hash=sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5 \
+ --hash=sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3 \
+ --hash=sha256:cbc95b3813920145032412f7e33d12080f11dc776262df1712e1638207dde9e8 \
+ --hash=sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0 \
+ --hash=sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036 \
+ --hash=sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6 \
+ --hash=sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8 \
+ --hash=sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48 \
+ --hash=sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07 \
+ --hash=sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b \
+ --hash=sha256:f0a1a8476ad77a228e41619af2fa9505cf69df928e9aaa165746584ea17fed2b \
+ --hash=sha256:f75018be4980a7324edc5930fe39aa391d5734531b1926968605416ff58c332d \
+ --hash=sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0 \
+ --hash=sha256:fb1752a3bb9a3ad2d6b090b88a9a0ae1cd6f004ef95f75825e2f382c183b2097 \
+ --hash=sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be \
+ --hash=sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5
+ # via
+ # onnx
+ # onnxruntime
+ # opencv-python
+ # torch-inference-optimized
+ # torchvision
+nvidia-cublas-cu12==12.8.4.1 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142
+ # via
+ # nvidia-cudnn-cu12
+ # nvidia-cusolver-cu12
+ # torch
+nvidia-cuda-cupti-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182
+ # via torch
+nvidia-cuda-nvrtc-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994
+ # via torch
+nvidia-cuda-runtime-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d \
+ --hash=sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90 \
+ --hash=sha256:c0c6027f01505bfed6c3b21ec546f69c687689aad5f1a377554bc6ca4aa993a8
+ # via torch
+nvidia-cudnn-cu12==9.10.2.21 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8
+ # via torch
+nvidia-cufft-cu12==11.3.3.83 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74
+ # via torch
+nvidia-cufile-cu12==1.13.1.3 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc
+ # via torch
+nvidia-curand-cu12==10.3.9.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9
+ # via torch
+nvidia-cusolver-cu12==11.7.3.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450
+ # via torch
+nvidia-cusparse-cu12==12.5.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b
+ # via
+ # nvidia-cusolver-cu12
+ # torch
+nvidia-cusparselt-cu12==0.7.1 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623
+ # via torch
+nvidia-nccl-cu12==2.27.3 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:adf27ccf4238253e0b826bce3ff5fa532d65fc42322c8bfdfaf28024c0fbe039
+ # via torch
+nvidia-nvjitlink-cu12==12.8.93 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88
+ # via
+ # nvidia-cufft-cu12
+ # nvidia-cusolver-cu12
+ # nvidia-cusparse-cu12
+ # torch
+nvidia-nvtx-cu12==12.8.90 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f
+ # via torch
+onnx==1.18.0 \
+ --hash=sha256:030d9f5f878c5f4c0ff70a4545b90d7812cd6bfe511de2f3e469d3669c8cff95 \
+ --hash=sha256:102c04edc76b16e9dfeda5a64c1fccd7d3d2913b1544750c01d38f1ac3c04e05 \
+ --hash=sha256:230b0fb615e5b798dc4a3718999ec1828360bc71274abd14f915135eab0255f1 \
+ --hash=sha256:2f4d37b0b5c96a873887652d1cbf3f3c70821b8c66302d84b0f0d89dd6e47653 \
+ --hash=sha256:3c137eecf6bc618c2f9398bcc381474b55c817237992b169dfe728e169549e8f \
+ --hash=sha256:3d8dbf9e996629131ba3aa1afd1d8239b660d1f830c6688dd7e03157cccd6b9c \
+ --hash=sha256:4a3b50d94620e2c7c1404d1d59bc53e665883ae3fecbd856cc86da0639fd0fc3 \
+ --hash=sha256:4c8c4bbda760c654e65eaffddb1a7de71ec02e60092d33f9000521f897c99be9 \
+ --hash=sha256:521bac578448667cbb37c50bf05b53c301243ede8233029555239930996a625b \
+ --hash=sha256:6acafb3823238bbe8f4340c7ac32fb218689442e074d797bee1c5c9a02fdae75 \
+ --hash=sha256:6c093ffc593e07f7e33862824eab9225f86aa189c048dd43ffde207d7041a55f \
+ --hash=sha256:6f91930c1a284135db0f891695a263fc876466bf2afbd2215834ac08f600cfca \
+ --hash=sha256:73160799472e1a86083f786fecdf864cf43d55325492a9b5a1cfa64d8a523ecc \
+ --hash=sha256:735e06d8d0cf250dc498f54038831401063c655a8d6e5975b2527a4e7d24be3e \
+ --hash=sha256:8521544987d713941ee1e591520044d35e702f73dc87e91e6d4b15a064ae813d \
+ --hash=sha256:911b37d724a5d97396f3c2ef9ea25361c55cbc9aa18d75b12a52b620b67145af \
+ --hash=sha256:9235b3493951e11e75465d56f4cd97e3e9247f096160dd3466bfabe4cbc938bc \
+ --hash=sha256:99afac90b4cdb1471432203c3c1f74e16549c526df27056d39f41a9a47cfb4af \
+ --hash=sha256:a5810194f0f6be2e58c8d6dedc6119510df7a14280dd07ed5f0f0a85bd74816a \
+ --hash=sha256:a69afd0baa372162948b52c13f3aa2730123381edf926d7ef3f68ca7cec6d0d0 \
+ --hash=sha256:aa1b7483fac6cdec26922174fc4433f8f5c2f239b1133c5625063bb3b35957d0 \
+ --hash=sha256:bfb1f271b1523b29f324bfd223f6a4cfbdc5a2f2f16e73563671932d33663365 \
+ --hash=sha256:e03071041efd82e0317b3c45433b2f28146385b80f26f82039bc68048ac1a7a0 \
+ --hash=sha256:e189652dad6e70a0465035c55cc565c27aa38803dd4f4e74e4b952ee1c2de94b \
+ --hash=sha256:e4da451bf1c5ae381f32d430004a89f0405bc57a8471b0bddb6325a5b334aa40 \
+ --hash=sha256:ee159b41a3ae58d9c7341cf432fc74b96aaf50bd7bb1160029f657b40dc69715
+ # via
+ # onnxsim
+ # torch-inference-optimized
+onnxruntime==1.22.1 \
+ --hash=sha256:01e2f21b2793eb0c8642d2be3cee34cc7d96b85f45f6615e4e220424158877ce \
+ --hash=sha256:2d39a530aff1ec8d02e365f35e503193991417788641b184f5b1e8c9a6d5ce8d \
+ --hash=sha256:33a7980bbc4b7f446bac26c3785652fe8730ed02617d765399e89ac7d44e0f7d \
+ --hash=sha256:460487d83b7056ba98f1f7bac80287224c31d8149b15712b0d6f5078fcc33d0f \
+ --hash=sha256:6a64291d57ea966a245f749eb970f4fa05a64d26672e05a83fdb5db6b7d62f87 \
+ --hash=sha256:6e7e823624b015ea879d976cbef8bfaed2f7e2cc233d7506860a76dd37f8f381 \
+ --hash=sha256:70980d729145a36a05f74b573435531f55ef9503bcda81fc6c3d6b9306199982 \
+ --hash=sha256:7ae7526cf10f93454beb0f751e78e5cb7619e3b92f9fc3bd51aa6f3b7a8977e5 \
+ --hash=sha256:80e7f51da1f5201c1379b8d6ef6170505cd800e40da216290f5e06be01aadf95 \
+ --hash=sha256:984cea2a02fcc5dfea44ade9aca9fe0f7a8a2cd6f77c258fc4388238618f3928 \
+ --hash=sha256:a938d11c0dc811badf78e435daa3899d9af38abee950d87f3ab7430eb5b3cf5a \
+ --hash=sha256:b0c37070268ba4e02a1a9d28560cd00cd1e94f0d4f275cbef283854f861a65fa \
+ --hash=sha256:b89ddfdbbdaf7e3a59515dee657f6515601d55cb21a0f0f48c81aefc54ff1b73 \
+ --hash=sha256:bddc75868bcf6f9ed76858a632f65f7b1846bdcefc6d637b1e359c2c68609964 \
+ --hash=sha256:d29c7d87b6cbed8fecfd09dca471832384d12a69e1ab873e5effbb94adc3e966 \
+ --hash=sha256:f28a42bb322b4ca6d255531bb334a2b3e21f172e37c1741bd5e66bc4b7b61f03 \
+ --hash=sha256:f4581bccb786da68725d8eac7c63a8f31a89116b8761ff8b4989dc58b61d49a0 \
+ --hash=sha256:f6effa1299ac549a05c784d50292e3378dbbf010346ded67400193b09ddc2f04
+ # via torch-inference-optimized
+onnxsim==0.4.36 \
+ --hash=sha256:150b9a3a409af2f3161af3fecda2113e0e6e296fb015b5205a9ddf645765acad \
+ --hash=sha256:6e0ee9d6d4a83042bdef7319fbe58352d9fda5f253386be2b267c7c27f0638ee \
+ --hash=sha256:7498e7b9584c4b354b455564dfba66d460ce2c205b71dae169cfa9b6704e03fd \
+ --hash=sha256:91fb32def04f2f89d5f76527c852332366957752e5e61ac25be0b2d7bb410f89 \
+ --hash=sha256:ce87837f8975beebdcc98cc01d6d13e84b10900eb2c14035ce1066c3d670d96d \
+ --hash=sha256:f92bec8c6c0d4f8463e10021277711d2faac900e4eb890238001b3eadb5c03bc \
+ --hash=sha256:fa7596e6b806ed19077f7652788a50ee576c172b4d16d421f0593aef1a6fa4c4
+ # via torch-inference-optimized
+opencv-python==4.11.0.86 \
+ --hash=sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4 \
+ --hash=sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec \
+ --hash=sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202 \
+ --hash=sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a \
+ --hash=sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d \
+ --hash=sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b \
+ --hash=sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66
+ # via torch-inference-optimized
+packaging==25.0 \
+ --hash=sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484 \
+ --hash=sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f
+ # via
+ # black
+ # dparse
+ # huggingface-hub
+ # onnxruntime
+ # pyproject-api
+ # pytest
+ # safety
+ # safety-schemas
+ # tox
+pathspec==0.12.1 \
+ --hash=sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08 \
+ --hash=sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712
+ # via
+ # black
+ # mypy
+pbr==6.1.1 \
+ --hash=sha256:38d4daea5d9fa63b3f626131b9d34947fd0c8be9b05a29276870580050a25a76 \
+ --hash=sha256:93ea72ce6989eb2eed99d0f75721474f69ad88128afdef5ac377eb797c4bf76b
+ # via stevedore
+pillow==11.3.0 \
+ --hash=sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2 \
+ --hash=sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214 \
+ --hash=sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e \
+ --hash=sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59 \
+ --hash=sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50 \
+ --hash=sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632 \
+ --hash=sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a \
+ --hash=sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51 \
+ --hash=sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced \
+ --hash=sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f \
+ --hash=sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12 \
+ --hash=sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8 \
+ --hash=sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6 \
+ --hash=sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580 \
+ --hash=sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f \
+ --hash=sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac \
+ --hash=sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860 \
+ --hash=sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd \
+ --hash=sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722 \
+ --hash=sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8 \
+ --hash=sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673 \
+ --hash=sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788 \
+ --hash=sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542 \
+ --hash=sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e \
+ --hash=sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd \
+ --hash=sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8 \
+ --hash=sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523 \
+ --hash=sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967 \
+ --hash=sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809 \
+ --hash=sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477 \
+ --hash=sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027 \
+ --hash=sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae \
+ --hash=sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b \
+ --hash=sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c \
+ --hash=sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e \
+ --hash=sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b \
+ --hash=sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7 \
+ --hash=sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27 \
+ --hash=sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361 \
+ --hash=sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae \
+ --hash=sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d \
+ --hash=sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58 \
+ --hash=sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad \
+ --hash=sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6 \
+ --hash=sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024 \
+ --hash=sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d \
+ --hash=sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0 \
+ --hash=sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9 \
+ --hash=sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f \
+ --hash=sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874 \
+ --hash=sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa \
+ --hash=sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149 \
+ --hash=sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6 \
+ --hash=sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d \
+ --hash=sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd \
+ --hash=sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f \
+ --hash=sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c \
+ --hash=sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31 \
+ --hash=sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e \
+ --hash=sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db \
+ --hash=sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6 \
+ --hash=sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f \
+ --hash=sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494 \
+ --hash=sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69 \
+ --hash=sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94 \
+ --hash=sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77 \
+ --hash=sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7 \
+ --hash=sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a \
+ --hash=sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438 \
+ --hash=sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288 \
+ --hash=sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b \
+ --hash=sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635 \
+ --hash=sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3 \
+ --hash=sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d \
+ --hash=sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe \
+ --hash=sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0 \
+ --hash=sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe \
+ --hash=sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a \
+ --hash=sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805 \
+ --hash=sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8 \
+ --hash=sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36 \
+ --hash=sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b \
+ --hash=sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e \
+ --hash=sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25 \
+ --hash=sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12 \
+ --hash=sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c \
+ --hash=sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d \
+ --hash=sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c \
+ --hash=sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6 \
+ --hash=sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1 \
+ --hash=sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50 \
+ --hash=sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653 \
+ --hash=sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c \
+ --hash=sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4 \
+ --hash=sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3
+ # via
+ # torch-inference-optimized
+ # torchvision
+platformdirs==4.3.8 \
+ --hash=sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc \
+ --hash=sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4
+ # via
+ # black
+ # tox
+ # virtualenv
+pluggy==1.6.0 \
+ --hash=sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3 \
+ --hash=sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746
+ # via
+ # pytest
+ # pytest-cov
+ # tox
+pre-commit==4.3.0 \
+ --hash=sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8 \
+ --hash=sha256:499fe450cc9d42e9d58e606262795ecb64dd05438943c62b66f6a8673da30b16
+propcache==0.3.2 \
+ --hash=sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c \
+ --hash=sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81 \
+ --hash=sha256:06766d8f34733416e2e34f46fea488ad5d60726bb9481d3cddf89a6fa2d9603f \
+ --hash=sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6 \
+ --hash=sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535 \
+ --hash=sha256:0b8d2f607bd8f80ddc04088bc2a037fdd17884a6fcadc47a96e334d72f3717be \
+ --hash=sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba \
+ --hash=sha256:0d0fda578d1dc3f77b6b5a5dce3b9ad69a8250a891760a548df850a5e8da87f3 \
+ --hash=sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0 \
+ --hash=sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168 \
+ --hash=sha256:2183111651d710d3097338dd1893fcf09c9f54e27ff1a8795495a16a469cc90b \
+ --hash=sha256:22d9962a358aedbb7a2e36187ff273adeaab9743373a272976d2e348d08c7770 \
+ --hash=sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892 \
+ --hash=sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154 \
+ --hash=sha256:2a4092e8549031e82facf3decdbc0883755d5bbcc62d3aea9d9e185549936dcf \
+ --hash=sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1 \
+ --hash=sha256:36c8d9b673ec57900c3554264e630d45980fd302458e4ac801802a7fd2ef7897 \
+ --hash=sha256:3def3da3ac3ce41562d85db655d18ebac740cb3fa4367f11a52b3da9d03a5cc3 \
+ --hash=sha256:404d70768080d3d3bdb41d0771037da19d8340d50b08e104ca0e7f9ce55fce70 \
+ --hash=sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330 \
+ --hash=sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44 \
+ --hash=sha256:4927842833830942a5d0a56e6f4839bc484785b8e1ce8d287359794818633ba0 \
+ --hash=sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88 \
+ --hash=sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3 \
+ --hash=sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43 \
+ --hash=sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4 \
+ --hash=sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1 \
+ --hash=sha256:55ffda449a507e9fbd4aca1a7d9aa6753b07d6166140e5a18d2ac9bc49eac220 \
+ --hash=sha256:5745bc7acdafa978ca1642891b82c19238eadc78ba2aaa293c6863b304e552d7 \
+ --hash=sha256:59d61f6970ecbd8ff2e9360304d5c8876a6abd4530cb752c06586849ac8a9dc9 \
+ --hash=sha256:5f559e127134b07425134b4065be45b166183fdcb433cb6c24c8e4149056ad50 \
+ --hash=sha256:5f57aa0847730daceff0497f417c9de353c575d8da3579162cc74ac294c5369e \
+ --hash=sha256:6107ddd08b02654a30fb8ad7a132021759d750a82578b94cd55ee2772b6ebea2 \
+ --hash=sha256:62180e0b8dbb6b004baec00a7983e4cc52f5ada9cd11f48c3528d8cfa7b96a66 \
+ --hash=sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1 \
+ --hash=sha256:64a67fb39229a8a8491dd42f864e5e263155e729c2e7ff723d6e25f596b1e8cb \
+ --hash=sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe \
+ --hash=sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c \
+ --hash=sha256:70bd8b9cd6b519e12859c99f3fc9a93f375ebd22a50296c3a295028bea73b9e7 \
+ --hash=sha256:7435d766f978b4ede777002e6b3b6641dd229cd1da8d3d3106a45770365f9ad9 \
+ --hash=sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e \
+ --hash=sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8 \
+ --hash=sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b \
+ --hash=sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f \
+ --hash=sha256:85871b050f174bc0bfb437efbdb68aaf860611953ed12418e4361bc9c392749e \
+ --hash=sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02 \
+ --hash=sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e \
+ --hash=sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1 \
+ --hash=sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10 \
+ --hash=sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387 \
+ --hash=sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198 \
+ --hash=sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f \
+ --hash=sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b \
+ --hash=sha256:9bec58347a5a6cebf239daba9bda37dffec5b8d2ce004d9fe4edef3d2815137e \
+ --hash=sha256:9da1cf97b92b51253d5b68cf5a2b9e0dafca095e36b7f2da335e27dc6172a614 \
+ --hash=sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252 \
+ --hash=sha256:a2dc1f4a1df4fecf4e6f68013575ff4af84ef6f478fe5344317a65d38a8e6dc9 \
+ --hash=sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c \
+ --hash=sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770 \
+ --hash=sha256:aff2e4e06435d61f11a428360a932138d0ec288b0a31dd9bd78d200bd4a2b339 \
+ --hash=sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251 \
+ --hash=sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db \
+ --hash=sha256:be29c4f4810c5789cf10ddf6af80b041c724e629fa51e308a7a0fb19ed1ef7bf \
+ --hash=sha256:c0075bf773d66fa8c9d41f66cc132ecc75e5bb9dd7cce3cfd14adc5ca184cb95 \
+ --hash=sha256:c144ca294a204c470f18cf4c9d78887810d04a3e2fbb30eea903575a779159df \
+ --hash=sha256:c5c2a784234c28854878d68978265617aa6dc0780e53d44b4d67f3651a17a9a2 \
+ --hash=sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945 \
+ --hash=sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474 \
+ --hash=sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615 \
+ --hash=sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06 \
+ --hash=sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33 \
+ --hash=sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1 \
+ --hash=sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05 \
+ --hash=sha256:e53af8cb6a781b02d2ea079b5b853ba9430fcbe18a8e3ce647d5982a3ff69f39 \
+ --hash=sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67 \
+ --hash=sha256:eef914c014bf72d18efb55619447e0aecd5fb7c2e3fa7441e2e5d6099bddff7e \
+ --hash=sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28 \
+ --hash=sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a \
+ --hash=sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394 \
+ --hash=sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725 \
+ --hash=sha256:fb075ad271405dcad8e2a7ffc9a750a3bf70e533bd86e89f0603e607b93aa64c \
+ --hash=sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206
+ # via
+ # aiohttp
+ # yarl
+protobuf==6.31.1 \
+ --hash=sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447 \
+ --hash=sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6 \
+ --hash=sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402 \
+ --hash=sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e \
+ --hash=sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9 \
+ --hash=sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39 \
+ --hash=sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a
+ # via
+ # onnx
+ # onnxruntime
+psutil==7.0.0 \
+ --hash=sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25 \
+ --hash=sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91 \
+ --hash=sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da \
+ --hash=sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34 \
+ --hash=sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553 \
+ --hash=sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456 \
+ --hash=sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993 \
+ --hash=sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99
+ # via torch-inference-optimized
+py-cpuinfo==9.0.0 \
+ --hash=sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690 \
+ --hash=sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5
+ # via pytest-benchmark
+pycparser==2.22 ; platform_python_implementation != 'PyPy' \
+ --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \
+ --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc
+ # via cffi
+pydantic==2.11.7 \
+ --hash=sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db \
+ --hash=sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b
+ # via
+ # fastapi
+ # safety
+ # safety-schemas
+ # torch-inference-optimized
+pydantic-core==2.33.2 \
+ --hash=sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d \
+ --hash=sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac \
+ --hash=sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02 \
+ --hash=sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56 \
+ --hash=sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22 \
+ --hash=sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef \
+ --hash=sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec \
+ --hash=sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d \
+ --hash=sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a \
+ --hash=sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f \
+ --hash=sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052 \
+ --hash=sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab \
+ --hash=sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916 \
+ --hash=sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c \
+ --hash=sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf \
+ --hash=sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a \
+ --hash=sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8 \
+ --hash=sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7 \
+ --hash=sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612 \
+ --hash=sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1 \
+ --hash=sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7 \
+ --hash=sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a \
+ --hash=sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b \
+ --hash=sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7 \
+ --hash=sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025 \
+ --hash=sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849 \
+ --hash=sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b \
+ --hash=sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa \
+ --hash=sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e \
+ --hash=sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea \
+ --hash=sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac \
+ --hash=sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51 \
+ --hash=sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e \
+ --hash=sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162 \
+ --hash=sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65 \
+ --hash=sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2 \
+ --hash=sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b \
+ --hash=sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de \
+ --hash=sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc \
+ --hash=sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb \
+ --hash=sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d \
+ --hash=sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef \
+ --hash=sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1 \
+ --hash=sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5 \
+ --hash=sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88 \
+ --hash=sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290 \
+ --hash=sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d \
+ --hash=sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808 \
+ --hash=sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc \
+ --hash=sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc \
+ --hash=sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e \
+ --hash=sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640 \
+ --hash=sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30 \
+ --hash=sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e \
+ --hash=sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9 \
+ --hash=sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9 \
+ --hash=sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f \
+ --hash=sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5 \
+ --hash=sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab \
+ --hash=sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572 \
+ --hash=sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593 \
+ --hash=sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29 \
+ --hash=sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1 \
+ --hash=sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f \
+ --hash=sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8 \
+ --hash=sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf \
+ --hash=sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246 \
+ --hash=sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9 \
+ --hash=sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011 \
+ --hash=sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a \
+ --hash=sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6 \
+ --hash=sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8 \
+ --hash=sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a \
+ --hash=sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2 \
+ --hash=sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c \
+ --hash=sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6 \
+ --hash=sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d
+ # via pydantic
+pygments==2.19.2 \
+ --hash=sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887 \
+ --hash=sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b
+ # via
+ # pytest
+ # rich
+pyproject-api==1.9.1 \
+ --hash=sha256:43c9918f49daab37e302038fc1aed54a8c7a91a9fa935d00b9a485f37e0f5335 \
+ --hash=sha256:7d6238d92f8962773dd75b5f0c4a6a27cce092a14b623b811dba656f3b628948
+ # via tox
+pyreadline3==3.5.4 ; sys_platform == 'win32' \
+ --hash=sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7 \
+ --hash=sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6
+ # via humanfriendly
+pytest==8.4.1 \
+ --hash=sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7 \
+ --hash=sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c
+ # via
+ # pytest-asyncio
+ # pytest-benchmark
+ # pytest-cov
+ # pytest-html
+ # pytest-json-report
+ # pytest-metadata
+ # pytest-mock
+ # pytest-timeout
+ # pytest-xdist
+pytest-asyncio==1.1.0 \
+ --hash=sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf \
+ --hash=sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea
+pytest-benchmark==5.1.0 \
+ --hash=sha256:922de2dfa3033c227c96da942d1878191afa135a29485fb942e85dff1c592c89 \
+ --hash=sha256:9ea661cdc292e8231f7cd4c10b0319e56a2118e2c09d9f50e1b3d150d2aca105
+pytest-cov==6.2.1 \
+ --hash=sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2 \
+ --hash=sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5
+pytest-html==4.1.1 \
+ --hash=sha256:70a01e8ae5800f4a074b56a4cb1025c8f4f9b038bba5fe31e3c98eb996686f07 \
+ --hash=sha256:c8152cea03bd4e9bee6d525573b67bbc6622967b72b9628dda0ea3e2a0b5dd71
+pytest-json-report==1.5.0 \
+ --hash=sha256:2dde3c647851a19b5f3700729e8310a6e66efb2077d674f27ddea3d34dc615de \
+ --hash=sha256:9897b68c910b12a2e48dd849f9a284b2c79a732a8a9cb398452ddd23d3c8c325
+pytest-metadata==3.1.1 \
+ --hash=sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b \
+ --hash=sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8
+ # via
+ # pytest-html
+ # pytest-json-report
+pytest-mock==3.14.1 \
+ --hash=sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e \
+ --hash=sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0
+pytest-timeout==2.4.0 \
+ --hash=sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a \
+ --hash=sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2
+pytest-xdist==3.8.0 \
+ --hash=sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88 \
+ --hash=sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1
+pyyaml==6.0.2 \
+ --hash=sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48 \
+ --hash=sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086 \
+ --hash=sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133 \
+ --hash=sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5 \
+ --hash=sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484 \
+ --hash=sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee \
+ --hash=sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5 \
+ --hash=sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68 \
+ --hash=sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf \
+ --hash=sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99 \
+ --hash=sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85 \
+ --hash=sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc \
+ --hash=sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1 \
+ --hash=sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317 \
+ --hash=sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c \
+ --hash=sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652 \
+ --hash=sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5 \
+ --hash=sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e \
+ --hash=sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b \
+ --hash=sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8 \
+ --hash=sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476 \
+ --hash=sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563 \
+ --hash=sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237 \
+ --hash=sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b \
+ --hash=sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180 \
+ --hash=sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425 \
+ --hash=sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e \
+ --hash=sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183 \
+ --hash=sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab \
+ --hash=sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774 \
+ --hash=sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725 \
+ --hash=sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e \
+ --hash=sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44 \
+ --hash=sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed \
+ --hash=sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4 \
+ --hash=sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba \
+ --hash=sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4
+ # via
+ # bandit
+ # huggingface-hub
+ # pre-commit
+ # torch-inference-optimized
+requests==2.32.4 \
+ --hash=sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c \
+ --hash=sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422
+ # via
+ # huggingface-hub
+ # safety
+ # torch-inference-optimized
+rich==14.1.0 \
+ --hash=sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f \
+ --hash=sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8
+ # via
+ # bandit
+ # onnxsim
+ # safety
+ # typer
+ruamel-yaml==0.18.14 \
+ --hash=sha256:710ff198bb53da66718c7db27eec4fbcc9aa6ca7204e4c1df2f282b6fe5eb6b2 \
+ --hash=sha256:7227b76aaec364df15936730efbf7d72b30c0b79b1d578bbb8e3dcb2d81f52b7
+ # via
+ # safety
+ # safety-schemas
+ruamel-yaml-clib==0.2.12 ; python_full_version < '3.14' and platform_python_implementation == 'CPython' \
+ --hash=sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4 \
+ --hash=sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef \
+ --hash=sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5 \
+ --hash=sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3 \
+ --hash=sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632 \
+ --hash=sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6 \
+ --hash=sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680 \
+ --hash=sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf \
+ --hash=sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da \
+ --hash=sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6 \
+ --hash=sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a \
+ --hash=sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01 \
+ --hash=sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6 \
+ --hash=sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f \
+ --hash=sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd \
+ --hash=sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2 \
+ --hash=sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52 \
+ --hash=sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd \
+ --hash=sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d \
+ --hash=sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c \
+ --hash=sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6 \
+ --hash=sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb \
+ --hash=sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a \
+ --hash=sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969 \
+ --hash=sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28 \
+ --hash=sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d \
+ --hash=sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e \
+ --hash=sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4 \
+ --hash=sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31 \
+ --hash=sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642 \
+ --hash=sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e \
+ --hash=sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1 \
+ --hash=sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3 \
+ --hash=sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475 \
+ --hash=sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5 \
+ --hash=sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76 \
+ --hash=sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df
+ # via ruamel-yaml
+ruff==0.12.8 \
+ --hash=sha256:0ac9c570634b98c71c88cb17badd90f13fc076a472ba6ef1d113d8ed3df109fb \
+ --hash=sha256:2fae54e752a3150f7ee0e09bce2e133caf10ce9d971510a9b925392dc98d2fec \
+ --hash=sha256:45c32487e14f60b88aad6be9fd5da5093dbefb0e3e1224131cb1d441d7cb7d46 \
+ --hash=sha256:49ebcaccc2bdad86fd51b7864e3d808aad404aab8df33d469b6e65584656263a \
+ --hash=sha256:4cb3a45525176e1009b2b64126acf5f9444ea59066262791febf55e40493a033 \
+ --hash=sha256:560e0cd641e45591a3e42cb50ef61ce07162b9c233786663fdce2d8557d99818 \
+ --hash=sha256:63cb5a5e933fc913e5823a0dfdc3c99add73f52d139d6cd5cc8639d0e0465513 \
+ --hash=sha256:71c83121512e7743fba5a8848c261dcc454cafb3ef2934a43f1b7a4eb5a447ea \
+ --hash=sha256:7209531f1a1fcfbe8e46bcd7ab30e2f43604d8ba1c49029bb420b103d0b5f76e \
+ --hash=sha256:9a9bbe28f9f551accf84a24c366c1aa8774d6748438b47174f8e8565ab9dedbc \
+ --hash=sha256:a2cab5f60d5b65b50fba39a8950c8746df1627d54ba1197f970763917184b161 \
+ --hash=sha256:ae3e7504666ad4c62f9ac8eedb52a93f9ebdeb34742b8b71cd3cccd24912719f \
+ --hash=sha256:c0acbcf01206df963d9331b5838fb31f3b44fa979ee7fa368b9b9057d89f4a53 \
+ --hash=sha256:c90e1a334683ce41b0e7a04f41790c429bf5073b62c1ae701c9dc5b3d14f0749 \
+ --hash=sha256:cb82efb5d35d07497813a1c5647867390a7d83304562607f3579602fa3d7d46f \
+ --hash=sha256:daf3475060a617fd5bc80638aeaf2f5937f10af3ec44464e280a9d2218e720d3 \
+ --hash=sha256:dbea798fc0065ad0b84a2947b0aff4233f0cb30f226f00a2c5850ca4393de609 \
+ --hash=sha256:de4429ef2ba091ecddedd300f4c3f24bca875d3d8b23340728c3cb0da81072c3
+safety==3.2.4 \
+ --hash=sha256:242ff7ae448d7fb2ea455c90f44e3f2ca45be9c8559b2fe9dfc89617164a0f17 \
+ --hash=sha256:bac0202016d736a2118057964a0e3983fa20ff2563fd103cac3f3ac1ed3fea11
+safety-schemas==0.0.5 \
+ --hash=sha256:0de5fc9a53d4423644a8ce9a17a2e474714aa27e57f3506146e95a41710ff104 \
+ --hash=sha256:6ac9eb71e60f0d4e944597c01dd48d6d8cd3d467c94da4aba3702a05a3a6ab4f
+ # via safety
+setuptools==80.9.0 \
+ --hash=sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922 \
+ --hash=sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c
+ # via
+ # pbr
+ # safety
+ # torch
+ # triton
+shellingham==1.5.4 \
+ --hash=sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686 \
+ --hash=sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de
+ # via typer
+sniffio==1.3.1 \
+ --hash=sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2 \
+ --hash=sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc
+ # via anyio
+starlette==0.47.2 \
+ --hash=sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8 \
+ --hash=sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b
+ # via fastapi
+stevedore==5.4.1 \
+ --hash=sha256:3135b5ae50fe12816ef291baff420acb727fcd356106e3e9cbfa9e5985cd6f4b \
+ --hash=sha256:d10a31c7b86cba16c1f6e8d15416955fc797052351a56af15e608ad20811fcfe
+ # via bandit
+sympy==1.14.0 \
+ --hash=sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517 \
+ --hash=sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5
+ # via
+ # onnxruntime
+ # torch
+tomli==2.2.1 ; python_full_version <= '3.11' \
+ --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \
+ --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \
+ --hash=sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c \
+ --hash=sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b \
+ --hash=sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8 \
+ --hash=sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6 \
+ --hash=sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77 \
+ --hash=sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff \
+ --hash=sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea \
+ --hash=sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192 \
+ --hash=sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249 \
+ --hash=sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee \
+ --hash=sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4 \
+ --hash=sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98 \
+ --hash=sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8 \
+ --hash=sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4 \
+ --hash=sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281 \
+ --hash=sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744 \
+ --hash=sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69 \
+ --hash=sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13 \
+ --hash=sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140 \
+ --hash=sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e \
+ --hash=sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e \
+ --hash=sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc \
+ --hash=sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff \
+ --hash=sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec \
+ --hash=sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2 \
+ --hash=sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222 \
+ --hash=sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106 \
+ --hash=sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272 \
+ --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \
+ --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7
+ # via
+ # bandit
+ # black
+ # coverage
+ # dparse
+ # mypy
+ # pyproject-api
+ # pytest
+ # tox
+torch==2.7.1 ; platform_machine == 'aarch64' and 'tegra' in platform_release \
+ --hash=sha256:03563603d931e70722dce0e11999d53aa80a375a3d78e6b39b9f6805ea0a8d28 \
+ --hash=sha256:0da4f4dba9f65d0d203794e619fe7ca3247a55ffdcbd17ae8fb83c8b2dc9b585 \
+ --hash=sha256:236f501f2e383f1cb861337bdf057712182f910f10aeaf509065d54d339e49b2 \
+ --hash=sha256:27ea1e518df4c9de73af7e8a720770f3628e7f667280bce2be7a16292697e3fa \
+ --hash=sha256:787687087412c4bd68d315e39bc1223f08aae1d16a9e9771d95eabbb04ae98fb \
+ --hash=sha256:988b0cbc4333618a1056d2ebad9eb10089637b659eb645434d0809d8d937b946 \
+ --hash=sha256:a103b5d782af5bd119b81dbcc7ffc6fa09904c423ff8db397a1e6ea8fd71508f \
+ --hash=sha256:aea4fc1bf433d12843eb2c6b2204861f43d8364597697074c8d38ae2507f8730 \
+ --hash=sha256:d72acfdb86cee2a32c0ce0101606f3758f0d8bb5f8f31e7920dc2809e963aa7c \
+ --hash=sha256:e08d7e6f21a617fe38eeb46dd2213ded43f27c072e9165dc27300c9ef9570934
+ # via
+ # torch-inference-optimized
+ # torchaudio
+ # torchvision
+torch==2.8.0 ; platform_machine != 'aarch64' or 'tegra' not in platform_release \
+ --hash=sha256:0be92c08b44009d4131d1ff7a8060d10bafdb7ddcb7359ef8d8c5169007ea905 \
+ --hash=sha256:1a62a1ec4b0498930e2543535cf70b1bef8c777713de7ceb84cd79115f553767 \
+ --hash=sha256:220a06fd7af8b653c35d359dfe1aaf32f65aa85befa342629f716acb134b9710 \
+ --hash=sha256:2b2f96814e0345f5a5aed9bf9734efa913678ed19caf6dc2cddb7930672d6128 \
+ --hash=sha256:2f4ac52f0130275d7517b03a33d2493bab3693c83dcfadf4f81688ea82147d2e \
+ --hash=sha256:5ae0524688fb6707c57a530c2325e13bb0090b745ba7b4a2cd6a3ce262572916 \
+ --hash=sha256:619c2869db3ada2c0105487ba21b5008defcc472d23f8b80ed91ac4a380283b0 \
+ --hash=sha256:65616ca8ec6f43245e1f5f296603e33923f4c30f93d65e103d9e50c25b35150b \
+ --hash=sha256:659df54119ae03e83a800addc125856effda88b016dfc54d9f65215c3975be16 \
+ --hash=sha256:7b677e17f5a3e69fdef7eb3b9da72622f8d322692930297e4ccb52fefc6c8211 \
+ --hash=sha256:83c13411a26fac3d101fe8035a6b0476ae606deb8688e904e796a3534c197def \
+ --hash=sha256:89aa9ee820bb39d4d72b794345cccef106b574508dd17dbec457949678c76011 \
+ --hash=sha256:8c7ef765e27551b2fbfc0f41bcf270e1292d9bf79f8e0724848b1682be6e80aa \
+ --hash=sha256:8f0a9d617a66509ded240add3754e462430a6c1fc5589f86c17b433dd808f97a \
+ --hash=sha256:a3f16a58a9a800f589b26d47ee15aca3acf065546137fc2af039876135f4c760 \
+ --hash=sha256:a7242b86f42be98ac674b88a4988643b9bc6145437ec8f048fea23f72feb5eca \
+ --hash=sha256:b2aca0939fb7e4d842561febbd4ffda67a8e958ff725c1c27e244e85e982173c \
+ --hash=sha256:c12fa219f51a933d5f80eeb3a7a5d0cbe9168c0a14bbb4055f1979431660879b \
+ --hash=sha256:e2fab4153768d433f8ed9279c8133a114a034a61e77a3a104dcdf54388838705 \
+ --hash=sha256:e8e5bf982e87e2b59d932769938b698858c64cc53753894be25629bdf5cf2f46
+ # via
+ # torch-inference-optimized
+ # torchaudio
+ # torchvision
+torchaudio==2.7.1 ; platform_machine == 'aarch64' and 'tegra' in platform_release \
+ --hash=sha256:1850475ef9101ea0b3593fe93ff6ee4e7a20598f6da6510761220b9fe56eb7fa \
+ --hash=sha256:18560955b8beb2a8d39a6bfae20a442337afcefb3dfd4ee007ce82233a796799 \
+ --hash=sha256:271f717844e5c7f9e05c8328de817bf90f46d83281c791e94f54d4edea2f5817 \
+ --hash=sha256:4739af57d0eb94347d1c6a1b5668be78a7383afe826dde18a04883b9f9f263b1 \
+ --hash=sha256:53bc4ba12e7468be34a7ca2ee837ee5c8bd5755b25c12f665af9339cae37e265 \
+ --hash=sha256:9306dcfc4586cebd7647a93fe9a448e791c4f83934da616b9433b75597a1f978 \
+ --hash=sha256:c089dbfc14c5f47091b7bf3f6bf2bbac93b86619299d04d9c102f4ad53758990 \
+ --hash=sha256:d5a62f88c629035913f506df03f710c48fc8bb9637191933f27c67088d5ca136 \
+ --hash=sha256:d66bd76b226fdd4135c97650e1b7eb63fb7659b4ed0e3a778898e41dbba21b61 \
+ --hash=sha256:e5f0599a507f4683546878ed9667e1b32d7ca3c8a957e4c15c6b302378ef4dee
+ # via torch-inference-optimized
+torchaudio==2.8.0 ; platform_machine != 'aarch64' or 'tegra' not in platform_release \
+ --hash=sha256:078105bf80f725c0215a0bebac8cb2fb1b3993ab32bdc3fcd50145a5b4127001 \
+ --hash=sha256:09535a9b727c0793cd07c1ace99f3f353626281bcc3e30c2f2314e3ebc9d3f96 \
+ --hash=sha256:1951f10ed092f2dda57634f6a3950ef21c9d9352551aa84a9fccd51bbda18095 \
+ --hash=sha256:4573c6042950c20278e3608a9a38050ba0bc72e0049e1bbfd249caf859a8029b \
+ --hash=sha256:4b82cacd1b8ccd543b1149d8cab257a40dfda8119023d2e3a96c66349c84bffb \
+ --hash=sha256:4e2b4712ad6d7547ce82d84567c8c29d5e2966ff1d31d94e1644024fb4b2649f \
+ --hash=sha256:4f7d97494698d98854129349b12061e8c3398d33bd84c929fa9aed5fd1389f73 \
+ --hash=sha256:58f912bf2d289c709b42a55475b2b483becec79d9affb7684b606bb1f896b434 \
+ --hash=sha256:68df9c9068984edff8065c2b6656725e6114fe89281b0cf122c7505305fc98a4 \
+ --hash=sha256:776c0b4ba84b9e3ddf6304b9c47cd63549d7896a6f3d5184ece074cc3d76ed6b \
+ --hash=sha256:862e2e40bf09d865e5df080a84c1a39bbcef40e43140f4b1737eb3a389d3b38f \
+ --hash=sha256:93a8583f280fe83ba021aa713319381ea71362cc87b67ee38e97a43cb2254aee \
+ --hash=sha256:c1b5139c840367a7855a062a06688a416619f6fd2ca46d9b9299b49a7d133dfd \
+ --hash=sha256:c2f44cf279f673cfcdd8f576c349eee8bedf8caab351a5dd78b32970cc34a212 \
+ --hash=sha256:c9276857d241c6de257af765c0f51fc011af38cb725401495121b280913007cf \
+ --hash=sha256:d2a85b124494736241884372fe1c6dd8c15e9bc1931bd325838c5c00238c7378 \
+ --hash=sha256:d3c1b85b26a09832d139f6d6da6b66caeb51d2e16e08f8587665c44a9e1aa8f9 \
+ --hash=sha256:d4a715d09ac28c920d031ee1e60ecbc91e8a5079ad8c61c0277e658436c821a6 \
+ --hash=sha256:ddef94bf181e6447cbb05f38beaca8f6c5bb8d2b9ddced1aa3452025b9fc70d3 \
+ --hash=sha256:f851d32e94ca05e470f0c60e25726ec1e0eb71cb2ca5a0206b7fd03272ccc3c8
+ # via torch-inference-optimized
+torchvision==0.22.1 ; platform_machine == 'aarch64' and 'tegra' in platform_release \
+ --hash=sha256:043d9e35ed69c2e586aff6eb9e2887382e7863707115668ac9d140da58f42cba \
+ --hash=sha256:153f1790e505bd6da123e21eee6e83e2e155df05c0fe7d56347303067d8543c5 \
+ --hash=sha256:27142bcc8a984227a6dcf560985e83f52b82a7d3f5fe9051af586a2ccc46ef26 \
+ --hash=sha256:3b47d8369ee568c067795c0da0b4078f39a9dfea6f3bc1f3ac87530dfda1dd56 \
+ --hash=sha256:4a614a6a408d2ed74208d0ea6c28a2fbb68290e9a7df206c5fef3f0b6865d307 \
+ --hash=sha256:4addf626e2b57fc22fd6d329cf1346d474497672e6af8383b7b5b636fba94a53 \
+ --hash=sha256:8b4a53a6067d63adba0c52f2b8dd2290db649d642021674ee43c0c922f0c6a69 \
+ --hash=sha256:964414eef19459d55a10e886e2fca50677550e243586d1678f65e3f6f6bac47a \
+ --hash=sha256:990de4d657a41ed71680cd8be2e98ebcab55371f30993dc9bd2e676441f7180e \
+ --hash=sha256:9c3ae3319624c43cc8127020f46c14aa878406781f0899bb6283ae474afeafbf
+ # via torch-inference-optimized
+torchvision==0.23.0 ; platform_machine != 'aarch64' or 'tegra' not in platform_release \
+ --hash=sha256:01dc33ee24c79148aee7cdbcf34ae8a3c9da1674a591e781577b716d233b1fa6 \
+ --hash=sha256:07d069cb29691ff566e3b7f11f20d91044f079e1dbdc9d72e0655899a9b06938 \
+ --hash=sha256:09bfde260e7963a15b80c9e442faa9f021c7e7f877ac0a36ca6561b367185013 \
+ --hash=sha256:1c37e325e09a184b730c3ef51424f383ec5745378dc0eca244520aca29722600 \
+ --hash=sha256:2a3299d2b1d5a7aed2d3b6ffb69c672ca8830671967eb1cee1497bacd82fe47b \
+ --hash=sha256:2df618e1143805a7673aaf82cb5720dd9112d4e771983156aaf2ffff692eebf9 \
+ --hash=sha256:2f7fd6c15f3697e80627b77934f77705f3bc0e98278b989b2655de01f6903e1d \
+ --hash=sha256:31c583ba27426a3a04eca8c05450524105c1564db41be6632f7536ef405a6de2 \
+ --hash=sha256:35c27941831b653f5101edfe62c03d196c13f32139310519e8228f35eae0e96a \
+ --hash=sha256:3932bf67256f2d095ce90a9f826f6033694c818856f4bb26794cf2ce64253e53 \
+ --hash=sha256:49aa20e21f0c2bd458c71d7b449776cbd5f16693dd5807195a820612b8a229b7 \
+ --hash=sha256:4e7d31c43bc7cbecbb1a5652ac0106b436aa66e26437585fc2c4b2cf04d6014c \
+ --hash=sha256:6dd7c4d329a0e03157803031bc856220c6155ef08c26d4f5bbac938acecf0948 \
+ --hash=sha256:7266871daca00ad46d1c073e55d972179d12a58fa5c9adec9a3db9bbed71284a \
+ --hash=sha256:76bc4c0b63d5114aa81281390f8472a12a6a35ce9906e67ea6044e5af4cab60c \
+ --hash=sha256:83ee5bf827d61a8af14620c0a61d8608558638ac9c3bac8adb7b27138e2147d1 \
+ --hash=sha256:a2e45272abe7b8bf0d06c405e78521b5757be1bd0ed7e5cd78120f7fdd4cbf35 \
+ --hash=sha256:a76fafe113b2977be3a21bf78f115438c1f88631d7a87203acb3dd6ae55889e6 \
+ --hash=sha256:b9e2dabf0da9c8aa9ea241afb63a8f3e98489e706b22ac3f30416a1be377153b \
+ --hash=sha256:e0e2c04a91403e8dd3af9756c6a024a1d9c0ed9c0d592a8314ded8f4fe30d440
+ # via torch-inference-optimized
+tox==4.11.4 \
+ --hash=sha256:2adb83d68f27116812b69aa36676a8d6a52249cb0d173649de0e7d0c2e3e7229 \
+ --hash=sha256:73a7240778fabf305aeb05ab8ea26e575e042ab5a18d71d0ed13e343a51d6ce1
+tqdm==4.67.1 \
+ --hash=sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2 \
+ --hash=sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2
+ # via
+ # huggingface-hub
+ # torch-inference-optimized
+triton==3.4.0 ; platform_machine == 'x86_64' and sys_platform == 'linux' \
+ --hash=sha256:00be2964616f4c619193cb0d1b29a99bd4b001d7dc333816073f92cf2a8ccdeb \
+ --hash=sha256:31c1d84a5c0ec2c0f8e8a072d7fd150cab84a9c239eaddc6706c081bfae4eb04 \
+ --hash=sha256:7936b18a3499ed62059414d7df563e6c163c5e16c3773678a3ee3d417865035d \
+ --hash=sha256:7b70f5e6a41e52e48cfc087436c8a28c17ff98db369447bcaff3b887a3ab4467 \
+ --hash=sha256:7ff2785de9bc02f500e085420273bb5cc9c9bb767584a4aa28d6e360cec70128
+ # via torch
+typer==0.16.0 \
+ --hash=sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855 \
+ --hash=sha256:af377ffaee1dbe37ae9440cb4e8f11686ea5ce4e9bae01b84ae7c63b87f1dd3b
+ # via safety
+typing-extensions==4.14.1 \
+ --hash=sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36 \
+ --hash=sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76
+ # via
+ # aiosignal
+ # anyio
+ # black
+ # exceptiongroup
+ # fastapi
+ # huggingface-hub
+ # marshmallow
+ # multidict
+ # mypy
+ # onnx
+ # pydantic
+ # pydantic-core
+ # safety
+ # safety-schemas
+ # starlette
+ # torch
+ # typer
+ # typing-inspection
+ # uvicorn
+typing-inspection==0.4.1 \
+ --hash=sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51 \
+ --hash=sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28
+ # via pydantic
+urllib3==2.5.0 \
+ --hash=sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760 \
+ --hash=sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc
+ # via
+ # requests
+ # safety
+uvicorn==0.35.0 \
+ --hash=sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a \
+ --hash=sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01
+ # via torch-inference-optimized
+virtualenv==20.33.1 \
+ --hash=sha256:07c19bc66c11acab6a5958b815cbcee30891cd1c2ccf53785a28651a0d8d8a67 \
+ --hash=sha256:1b44478d9e261b3fb8baa5e74a0ca3bc0e05f21aa36167bf9cbf850e542765b8
+ # via
+ # pre-commit
+ # tox
+yarl==1.20.1 \
+ --hash=sha256:03aa1e041727cb438ca762628109ef1333498b122e4c76dd858d186a37cec845 \
+ --hash=sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53 \
+ --hash=sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a \
+ --hash=sha256:0c869f2651cc77465f6cd01d938d91a11d9ea5d798738c1dc077f3de0b5e5fed \
+ --hash=sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2 \
+ --hash=sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02 \
+ --hash=sha256:12e768f966538e81e6e7550f9086a6236b16e26cd964cf4df35349970f3551cf \
+ --hash=sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3 \
+ --hash=sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef \
+ --hash=sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04 \
+ --hash=sha256:1f8a891e4a22a89f5dde7862994485e19db246b70bb288d3ce73a34422e55b23 \
+ --hash=sha256:21242b4288a6d56f04ea193adde174b7e347ac46ce6bc84989ff7c1b1ecea84e \
+ --hash=sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6 \
+ --hash=sha256:26ef53a9e726e61e9cd1cda6b478f17e350fb5800b4bd1cd9fe81c4d91cfeb2e \
+ --hash=sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a \
+ --hash=sha256:2c7b34d804b8cf9b214f05015c4fee2ebe7ed05cf581e7192c06555c71f4446a \
+ --hash=sha256:33f29ecfe0330c570d997bcf1afd304377f2e48f61447f37e846a6058a4d33b2 \
+ --hash=sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458 \
+ --hash=sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc \
+ --hash=sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d \
+ --hash=sha256:41ebd28167bc6af8abb97fec1a399f412eec5fd61a3ccbe2305a18b84fb4ca73 \
+ --hash=sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7 \
+ --hash=sha256:46b5e0ccf1943a9a6e766b2c2b8c732c55b34e28be57d8daa2b3c1d1d4009309 \
+ --hash=sha256:47ee6188fea634bdfaeb2cc420f5b3b17332e6225ce88149a17c413c77ff269e \
+ --hash=sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698 \
+ --hash=sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c \
+ --hash=sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691 \
+ --hash=sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16 \
+ --hash=sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f \
+ --hash=sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f \
+ --hash=sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004 \
+ --hash=sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3 \
+ --hash=sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28 \
+ --hash=sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513 \
+ --hash=sha256:597f40615b8d25812f14562699e287f0dcc035d25eb74da72cae043bb884d773 \
+ --hash=sha256:6032e6da6abd41e4acda34d75a816012717000fa6839f37124a47fcefc49bec4 \
+ --hash=sha256:62915e6688eb4d180d93840cda4110995ad50c459bf931b8b3775b37c264af1e \
+ --hash=sha256:642980ef5e0fa1de5fa96d905c7e00cb2c47cb468bfcac5a18c58e27dbf8d8d1 \
+ --hash=sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31 \
+ --hash=sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16 \
+ --hash=sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819 \
+ --hash=sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3 \
+ --hash=sha256:6c4fbf6b02d70e512d7ade4b1f998f237137f1417ab07ec06358ea04f69134f8 \
+ --hash=sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf \
+ --hash=sha256:76d12524d05841276b0e22573f28d5fbcb67589836772ae9244d90dd7d66aa13 \
+ --hash=sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1 \
+ --hash=sha256:7a8900a42fcdaad568de58887c7b2f602962356908eedb7628eaf6021a6e435b \
+ --hash=sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f \
+ --hash=sha256:812303eb4aa98e302886ccda58d6b099e3576b1b9276161469c25803a8db277d \
+ --hash=sha256:835ab2cfc74d5eb4a6a528c57f05688099da41cf4957cf08cad38647e4a83b30 \
+ --hash=sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77 \
+ --hash=sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a \
+ --hash=sha256:8601bc010d1d7780592f3fc1bdc6c72e2b6466ea34569778422943e1a1f3c389 \
+ --hash=sha256:86971e2795584fe8c002356d3b97ef6c61862720eeff03db2a7c86b678d85b3e \
+ --hash=sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e \
+ --hash=sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c \
+ --hash=sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1 \
+ --hash=sha256:8f969afbb0a9b63c18d0feecf0db09d164b7a44a053e78a7d05f5df163e43833 \
+ --hash=sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b \
+ --hash=sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee \
+ --hash=sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38 \
+ --hash=sha256:98c4a7d166635147924aa0bf9bfe8d8abad6fffa6102de9c99ea04a1376f91e8 \
+ --hash=sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd \
+ --hash=sha256:aef6c4d69554d44b7f9d923245f8ad9a707d971e6209d51279196d8e8fe1ae16 \
+ --hash=sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d \
+ --hash=sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a \
+ --hash=sha256:b982fa7f74c80d5c0c7b5b38f908971e513380a10fecea528091405f519b9ebb \
+ --hash=sha256:bad6d131fda8ef508b36be3ece16d0902e80b88ea7200f030a0f6c11d9e508d4 \
+ --hash=sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9 \
+ --hash=sha256:bea21cdae6c7eb02ba02a475f37463abfe0a01f5d7200121b03e605d6a0439f8 \
+ --hash=sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390 \
+ --hash=sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8 \
+ --hash=sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be \
+ --hash=sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac \
+ --hash=sha256:d0f6500f69e8402d513e5eedb77a4e1818691e8f45e6b687147963514d84b44b \
+ --hash=sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5 \
+ --hash=sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4 \
+ --hash=sha256:daadbdc1f2a9033a2399c42646fbd46da7992e868a5fe9513860122d7fe7a73f \
+ --hash=sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5 \
+ --hash=sha256:dd803820d44c8853a109a34e3660e5a61beae12970da479cf44aa2954019bf70 \
+ --hash=sha256:df018d92fe22aaebb679a7f89fe0c0f368ec497e3dda6cb81a567610f04501f1 \
+ --hash=sha256:df47c55f7d74127d1b11251fe6397d84afdde0d53b90bedb46a23c0e534f9d24 \
+ --hash=sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653 \
+ --hash=sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d \
+ --hash=sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7 \
+ --hash=sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce \
+ --hash=sha256:fe41919b9d899661c5c28a8b4b0acf704510b88f27f0934ac7a7bebdd8938d5e
+ # via aiohttp
diff --git a/uv.lock b/uv.lock
new file mode 100644
index 0000000..0a3da55
--- /dev/null
+++ b/uv.lock
@@ -0,0 +1,3707 @@
+version = 1
+revision = 3
+requires-python = ">=3.10"
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+
+[[package]]
+name = "aiohappyeyeballs"
+version = "2.6.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" },
+]
+
+[[package]]
+name = "aiohttp"
+version = "3.12.15"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "aiohappyeyeballs" },
+ { name = "aiosignal" },
+ { name = "async-timeout", marker = "python_full_version < '3.11'" },
+ { name = "attrs" },
+ { name = "frozenlist" },
+ { name = "multidict" },
+ { name = "propcache" },
+ { name = "yarl" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/9b/e7/d92a237d8802ca88483906c388f7c201bbe96cd80a165ffd0ac2f6a8d59f/aiohttp-3.12.15.tar.gz", hash = "sha256:4fc61385e9c98d72fcdf47e6dd81833f47b2f77c114c29cd64a361be57a763a2", size = 7823716, upload-time = "2025-07-29T05:52:32.215Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/47/dc/ef9394bde9080128ad401ac7ede185267ed637df03b51f05d14d1c99ad67/aiohttp-3.12.15-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b6fc902bff74d9b1879ad55f5404153e2b33a82e72a95c89cec5eb6cc9e92fbc", size = 703921, upload-time = "2025-07-29T05:49:43.584Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/42/63fccfc3a7ed97eb6e1a71722396f409c46b60a0552d8a56d7aad74e0df5/aiohttp-3.12.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:098e92835b8119b54c693f2f88a1dec690e20798ca5f5fe5f0520245253ee0af", size = 480288, upload-time = "2025-07-29T05:49:47.851Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/a2/7b8a020549f66ea2a68129db6960a762d2393248f1994499f8ba9728bbed/aiohttp-3.12.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:40b3fee496a47c3b4a39a731954c06f0bd9bd3e8258c059a4beb76ac23f8e421", size = 468063, upload-time = "2025-07-29T05:49:49.789Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/f5/d11e088da9176e2ad8220338ae0000ed5429a15f3c9dfd983f39105399cd/aiohttp-3.12.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ce13fcfb0bb2f259fb42106cdc63fa5515fb85b7e87177267d89a771a660b79", size = 1650122, upload-time = "2025-07-29T05:49:51.874Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/6b/b60ce2757e2faed3d70ed45dafee48cee7bfb878785a9423f7e883f0639c/aiohttp-3.12.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3beb14f053222b391bf9cf92ae82e0171067cc9c8f52453a0f1ec7c37df12a77", size = 1624176, upload-time = "2025-07-29T05:49:53.805Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/de/8c9fde2072a1b72c4fadecf4f7d4be7a85b1d9a4ab333d8245694057b4c6/aiohttp-3.12.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c39e87afe48aa3e814cac5f535bc6199180a53e38d3f51c5e2530f5aa4ec58c", size = 1696583, upload-time = "2025-07-29T05:49:55.338Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/ad/07f863ca3d895a1ad958a54006c6dafb4f9310f8c2fdb5f961b8529029d3/aiohttp-3.12.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5f1b4ce5bc528a6ee38dbf5f39bbf11dd127048726323b72b8e85769319ffc4", size = 1738896, upload-time = "2025-07-29T05:49:57.045Z" },
+ { url = "https://files.pythonhosted.org/packages/20/43/2bd482ebe2b126533e8755a49b128ec4e58f1a3af56879a3abdb7b42c54f/aiohttp-3.12.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1004e67962efabbaf3f03b11b4c43b834081c9e3f9b32b16a7d97d4708a9abe6", size = 1643561, upload-time = "2025-07-29T05:49:58.762Z" },
+ { url = "https://files.pythonhosted.org/packages/23/40/2fa9f514c4cf4cbae8d7911927f81a1901838baf5e09a8b2c299de1acfe5/aiohttp-3.12.15-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8faa08fcc2e411f7ab91d1541d9d597d3a90e9004180edb2072238c085eac8c2", size = 1583685, upload-time = "2025-07-29T05:50:00.375Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/c3/94dc7357bc421f4fb978ca72a201a6c604ee90148f1181790c129396ceeb/aiohttp-3.12.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:fe086edf38b2222328cdf89af0dde2439ee173b8ad7cb659b4e4c6f385b2be3d", size = 1627533, upload-time = "2025-07-29T05:50:02.306Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/3f/1f8911fe1844a07001e26593b5c255a685318943864b27b4e0267e840f95/aiohttp-3.12.15-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:79b26fe467219add81d5e47b4a4ba0f2394e8b7c7c3198ed36609f9ba161aecb", size = 1638319, upload-time = "2025-07-29T05:50:04.282Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/46/27bf57a99168c4e145ffee6b63d0458b9c66e58bb70687c23ad3d2f0bd17/aiohttp-3.12.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b761bac1192ef24e16706d761aefcb581438b34b13a2f069a6d343ec8fb693a5", size = 1613776, upload-time = "2025-07-29T05:50:05.863Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/7e/1d2d9061a574584bb4ad3dbdba0da90a27fdc795bc227def3a46186a8bc1/aiohttp-3.12.15-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e153e8adacfe2af562861b72f8bc47f8a5c08e010ac94eebbe33dc21d677cd5b", size = 1693359, upload-time = "2025-07-29T05:50:07.563Z" },
+ { url = "https://files.pythonhosted.org/packages/08/98/bee429b52233c4a391980a5b3b196b060872a13eadd41c3a34be9b1469ed/aiohttp-3.12.15-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fc49c4de44977aa8601a00edbf157e9a421f227aa7eb477d9e3df48343311065", size = 1716598, upload-time = "2025-07-29T05:50:09.33Z" },
+ { url = "https://files.pythonhosted.org/packages/57/39/b0314c1ea774df3392751b686104a3938c63ece2b7ce0ba1ed7c0b4a934f/aiohttp-3.12.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:2776c7ec89c54a47029940177e75c8c07c29c66f73464784971d6a81904ce9d1", size = 1644940, upload-time = "2025-07-29T05:50:11.334Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/83/3dacb8d3f8f512c8ca43e3fa8a68b20583bd25636ffa4e56ee841ffd79ae/aiohttp-3.12.15-cp310-cp310-win32.whl", hash = "sha256:2c7d81a277fa78b2203ab626ced1487420e8c11a8e373707ab72d189fcdad20a", size = 429239, upload-time = "2025-07-29T05:50:12.803Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/f9/470b5daba04d558c9673ca2034f28d067f3202a40e17804425f0c331c89f/aiohttp-3.12.15-cp310-cp310-win_amd64.whl", hash = "sha256:83603f881e11f0f710f8e2327817c82e79431ec976448839f3cd05d7afe8f830", size = 452297, upload-time = "2025-07-29T05:50:14.266Z" },
+ { url = "https://files.pythonhosted.org/packages/20/19/9e86722ec8e835959bd97ce8c1efa78cf361fa4531fca372551abcc9cdd6/aiohttp-3.12.15-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d3ce17ce0220383a0f9ea07175eeaa6aa13ae5a41f30bc61d84df17f0e9b1117", size = 711246, upload-time = "2025-07-29T05:50:15.937Z" },
+ { url = "https://files.pythonhosted.org/packages/71/f9/0a31fcb1a7d4629ac9d8f01f1cb9242e2f9943f47f5d03215af91c3c1a26/aiohttp-3.12.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:010cc9bbd06db80fe234d9003f67e97a10fe003bfbedb40da7d71c1008eda0fe", size = 483515, upload-time = "2025-07-29T05:50:17.442Z" },
+ { url = "https://files.pythonhosted.org/packages/62/6c/94846f576f1d11df0c2e41d3001000527c0fdf63fce7e69b3927a731325d/aiohttp-3.12.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f9d7c55b41ed687b9d7165b17672340187f87a773c98236c987f08c858145a9", size = 471776, upload-time = "2025-07-29T05:50:19.568Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/6c/f766d0aaafcee0447fad0328da780d344489c042e25cd58fde566bf40aed/aiohttp-3.12.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc4fbc61bb3548d3b482f9ac7ddd0f18c67e4225aaa4e8552b9f1ac7e6bda9e5", size = 1741977, upload-time = "2025-07-29T05:50:21.665Z" },
+ { url = "https://files.pythonhosted.org/packages/17/e5/fb779a05ba6ff44d7bc1e9d24c644e876bfff5abe5454f7b854cace1b9cc/aiohttp-3.12.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7fbc8a7c410bb3ad5d595bb7118147dfbb6449d862cc1125cf8867cb337e8728", size = 1690645, upload-time = "2025-07-29T05:50:23.333Z" },
+ { url = "https://files.pythonhosted.org/packages/37/4e/a22e799c2035f5d6a4ad2cf8e7c1d1bd0923192871dd6e367dafb158b14c/aiohttp-3.12.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:74dad41b3458dbb0511e760fb355bb0b6689e0630de8a22b1b62a98777136e16", size = 1789437, upload-time = "2025-07-29T05:50:25.007Z" },
+ { url = "https://files.pythonhosted.org/packages/28/e5/55a33b991f6433569babb56018b2fb8fb9146424f8b3a0c8ecca80556762/aiohttp-3.12.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b6f0af863cf17e6222b1735a756d664159e58855da99cfe965134a3ff63b0b0", size = 1828482, upload-time = "2025-07-29T05:50:26.693Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/82/1ddf0ea4f2f3afe79dffed5e8a246737cff6cbe781887a6a170299e33204/aiohttp-3.12.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5b7fe4972d48a4da367043b8e023fb70a04d1490aa7d68800e465d1b97e493b", size = 1730944, upload-time = "2025-07-29T05:50:28.382Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/96/784c785674117b4cb3877522a177ba1b5e4db9ce0fd519430b5de76eec90/aiohttp-3.12.15-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6443cca89553b7a5485331bc9bedb2342b08d073fa10b8c7d1c60579c4a7b9bd", size = 1668020, upload-time = "2025-07-29T05:50:30.032Z" },
+ { url = "https://files.pythonhosted.org/packages/12/8a/8b75f203ea7e5c21c0920d84dd24a5c0e971fe1e9b9ebbf29ae7e8e39790/aiohttp-3.12.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6c5f40ec615e5264f44b4282ee27628cea221fcad52f27405b80abb346d9f3f8", size = 1716292, upload-time = "2025-07-29T05:50:31.983Z" },
+ { url = "https://files.pythonhosted.org/packages/47/0b/a1451543475bb6b86a5cfc27861e52b14085ae232896a2654ff1231c0992/aiohttp-3.12.15-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:2abbb216a1d3a2fe86dbd2edce20cdc5e9ad0be6378455b05ec7f77361b3ab50", size = 1711451, upload-time = "2025-07-29T05:50:33.989Z" },
+ { url = "https://files.pythonhosted.org/packages/55/fd/793a23a197cc2f0d29188805cfc93aa613407f07e5f9da5cd1366afd9d7c/aiohttp-3.12.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:db71ce547012a5420a39c1b744d485cfb823564d01d5d20805977f5ea1345676", size = 1691634, upload-time = "2025-07-29T05:50:35.846Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/bf/23a335a6670b5f5dfc6d268328e55a22651b440fca341a64fccf1eada0c6/aiohttp-3.12.15-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:ced339d7c9b5030abad5854aa5413a77565e5b6e6248ff927d3e174baf3badf7", size = 1785238, upload-time = "2025-07-29T05:50:37.597Z" },
+ { url = "https://files.pythonhosted.org/packages/57/4f/ed60a591839a9d85d40694aba5cef86dde9ee51ce6cca0bb30d6eb1581e7/aiohttp-3.12.15-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:7c7dd29c7b5bda137464dc9bfc738d7ceea46ff70309859ffde8c022e9b08ba7", size = 1805701, upload-time = "2025-07-29T05:50:39.591Z" },
+ { url = "https://files.pythonhosted.org/packages/85/e0/444747a9455c5de188c0f4a0173ee701e2e325d4b2550e9af84abb20cdba/aiohttp-3.12.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:421da6fd326460517873274875c6c5a18ff225b40da2616083c5a34a7570b685", size = 1718758, upload-time = "2025-07-29T05:50:41.292Z" },
+ { url = "https://files.pythonhosted.org/packages/36/ab/1006278d1ffd13a698e5dd4bfa01e5878f6bddefc296c8b62649753ff249/aiohttp-3.12.15-cp311-cp311-win32.whl", hash = "sha256:4420cf9d179ec8dfe4be10e7d0fe47d6d606485512ea2265b0d8c5113372771b", size = 428868, upload-time = "2025-07-29T05:50:43.063Z" },
+ { url = "https://files.pythonhosted.org/packages/10/97/ad2b18700708452400278039272032170246a1bf8ec5d832772372c71f1a/aiohttp-3.12.15-cp311-cp311-win_amd64.whl", hash = "sha256:edd533a07da85baa4b423ee8839e3e91681c7bfa19b04260a469ee94b778bf6d", size = 453273, upload-time = "2025-07-29T05:50:44.613Z" },
+ { url = "https://files.pythonhosted.org/packages/63/97/77cb2450d9b35f517d6cf506256bf4f5bda3f93a66b4ad64ba7fc917899c/aiohttp-3.12.15-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:802d3868f5776e28f7bf69d349c26fc0efadb81676d0afa88ed00d98a26340b7", size = 702333, upload-time = "2025-07-29T05:50:46.507Z" },
+ { url = "https://files.pythonhosted.org/packages/83/6d/0544e6b08b748682c30b9f65640d006e51f90763b41d7c546693bc22900d/aiohttp-3.12.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2800614cd560287be05e33a679638e586a2d7401f4ddf99e304d98878c29444", size = 476948, upload-time = "2025-07-29T05:50:48.067Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/1d/c8c40e611e5094330284b1aea8a4b02ca0858f8458614fa35754cab42b9c/aiohttp-3.12.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8466151554b593909d30a0a125d638b4e5f3836e5aecde85b66b80ded1cb5b0d", size = 469787, upload-time = "2025-07-29T05:50:49.669Z" },
+ { url = "https://files.pythonhosted.org/packages/38/7d/b76438e70319796bfff717f325d97ce2e9310f752a267bfdf5192ac6082b/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e5a495cb1be69dae4b08f35a6c4579c539e9b5706f606632102c0f855bcba7c", size = 1716590, upload-time = "2025-07-29T05:50:51.368Z" },
+ { url = "https://files.pythonhosted.org/packages/79/b1/60370d70cdf8b269ee1444b390cbd72ce514f0d1cd1a715821c784d272c9/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6404dfc8cdde35c69aaa489bb3542fb86ef215fc70277c892be8af540e5e21c0", size = 1699241, upload-time = "2025-07-29T05:50:53.628Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/2b/4968a7b8792437ebc12186db31523f541943e99bda8f30335c482bea6879/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ead1c00f8521a5c9070fcb88f02967b1d8a0544e6d85c253f6968b785e1a2ab", size = 1754335, upload-time = "2025-07-29T05:50:55.394Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/c1/49524ed553f9a0bec1a11fac09e790f49ff669bcd14164f9fab608831c4d/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6990ef617f14450bc6b34941dba4f12d5613cbf4e33805932f853fbd1cf18bfb", size = 1800491, upload-time = "2025-07-29T05:50:57.202Z" },
+ { url = "https://files.pythonhosted.org/packages/de/5e/3bf5acea47a96a28c121b167f5ef659cf71208b19e52a88cdfa5c37f1fcc/aiohttp-3.12.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd736ed420f4db2b8148b52b46b88ed038d0354255f9a73196b7bbce3ea97545", size = 1719929, upload-time = "2025-07-29T05:50:59.192Z" },
+ { url = "https://files.pythonhosted.org/packages/39/94/8ae30b806835bcd1cba799ba35347dee6961a11bd507db634516210e91d8/aiohttp-3.12.15-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c5092ce14361a73086b90c6efb3948ffa5be2f5b6fbcf52e8d8c8b8848bb97c", size = 1635733, upload-time = "2025-07-29T05:51:01.394Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/46/06cdef71dd03acd9da7f51ab3a9107318aee12ad38d273f654e4f981583a/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:aaa2234bb60c4dbf82893e934d8ee8dea30446f0647e024074237a56a08c01bd", size = 1696790, upload-time = "2025-07-29T05:51:03.657Z" },
+ { url = "https://files.pythonhosted.org/packages/02/90/6b4cfaaf92ed98d0ec4d173e78b99b4b1a7551250be8937d9d67ecb356b4/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6d86a2fbdd14192e2f234a92d3b494dd4457e683ba07e5905a0b3ee25389ac9f", size = 1718245, upload-time = "2025-07-29T05:51:05.911Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/e6/2593751670fa06f080a846f37f112cbe6f873ba510d070136a6ed46117c6/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a041e7e2612041a6ddf1c6a33b883be6a421247c7afd47e885969ee4cc58bd8d", size = 1658899, upload-time = "2025-07-29T05:51:07.753Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/28/c15bacbdb8b8eb5bf39b10680d129ea7410b859e379b03190f02fa104ffd/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5015082477abeafad7203757ae44299a610e89ee82a1503e3d4184e6bafdd519", size = 1738459, upload-time = "2025-07-29T05:51:09.56Z" },
+ { url = "https://files.pythonhosted.org/packages/00/de/c269cbc4faa01fb10f143b1670633a8ddd5b2e1ffd0548f7aa49cb5c70e2/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:56822ff5ddfd1b745534e658faba944012346184fbfe732e0d6134b744516eea", size = 1766434, upload-time = "2025-07-29T05:51:11.423Z" },
+ { url = "https://files.pythonhosted.org/packages/52/b0/4ff3abd81aa7d929b27d2e1403722a65fc87b763e3a97b3a2a494bfc63bc/aiohttp-3.12.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b2acbbfff69019d9014508c4ba0401822e8bae5a5fdc3b6814285b71231b60f3", size = 1726045, upload-time = "2025-07-29T05:51:13.689Z" },
+ { url = "https://files.pythonhosted.org/packages/71/16/949225a6a2dd6efcbd855fbd90cf476052e648fb011aa538e3b15b89a57a/aiohttp-3.12.15-cp312-cp312-win32.whl", hash = "sha256:d849b0901b50f2185874b9a232f38e26b9b3d4810095a7572eacea939132d4e1", size = 423591, upload-time = "2025-07-29T05:51:15.452Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/d8/fa65d2a349fe938b76d309db1a56a75c4fb8cc7b17a398b698488a939903/aiohttp-3.12.15-cp312-cp312-win_amd64.whl", hash = "sha256:b390ef5f62bb508a9d67cb3bba9b8356e23b3996da7062f1a57ce1a79d2b3d34", size = 450266, upload-time = "2025-07-29T05:51:17.239Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/33/918091abcf102e39d15aba2476ad9e7bd35ddb190dcdd43a854000d3da0d/aiohttp-3.12.15-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:9f922ffd05034d439dde1c77a20461cf4a1b0831e6caa26151fe7aa8aaebc315", size = 696741, upload-time = "2025-07-29T05:51:19.021Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/2a/7495a81e39a998e400f3ecdd44a62107254803d1681d9189be5c2e4530cd/aiohttp-3.12.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2ee8a8ac39ce45f3e55663891d4b1d15598c157b4d494a4613e704c8b43112cd", size = 474407, upload-time = "2025-07-29T05:51:21.165Z" },
+ { url = "https://files.pythonhosted.org/packages/49/fc/a9576ab4be2dcbd0f73ee8675d16c707cfc12d5ee80ccf4015ba543480c9/aiohttp-3.12.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3eae49032c29d356b94eee45a3f39fdf4b0814b397638c2f718e96cfadf4c4e4", size = 466703, upload-time = "2025-07-29T05:51:22.948Z" },
+ { url = "https://files.pythonhosted.org/packages/09/2f/d4bcc8448cf536b2b54eed48f19682031ad182faa3a3fee54ebe5b156387/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b97752ff12cc12f46a9b20327104448042fce5c33a624f88c18f66f9368091c7", size = 1705532, upload-time = "2025-07-29T05:51:25.211Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/f3/59406396083f8b489261e3c011aa8aee9df360a96ac8fa5c2e7e1b8f0466/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:894261472691d6fe76ebb7fcf2e5870a2ac284c7406ddc95823c8598a1390f0d", size = 1686794, upload-time = "2025-07-29T05:51:27.145Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/71/164d194993a8d114ee5656c3b7ae9c12ceee7040d076bf7b32fb98a8c5c6/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5fa5d9eb82ce98959fc1031c28198b431b4d9396894f385cb63f1e2f3f20ca6b", size = 1738865, upload-time = "2025-07-29T05:51:29.366Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/00/d198461b699188a93ead39cb458554d9f0f69879b95078dce416d3209b54/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f0fa751efb11a541f57db59c1dd821bec09031e01452b2b6217319b3a1f34f3d", size = 1788238, upload-time = "2025-07-29T05:51:31.285Z" },
+ { url = "https://files.pythonhosted.org/packages/85/b8/9e7175e1fa0ac8e56baa83bf3c214823ce250d0028955dfb23f43d5e61fd/aiohttp-3.12.15-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5346b93e62ab51ee2a9d68e8f73c7cf96ffb73568a23e683f931e52450e4148d", size = 1710566, upload-time = "2025-07-29T05:51:33.219Z" },
+ { url = "https://files.pythonhosted.org/packages/59/e4/16a8eac9df39b48ae102ec030fa9f726d3570732e46ba0c592aeeb507b93/aiohttp-3.12.15-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:049ec0360f939cd164ecbfd2873eaa432613d5e77d6b04535e3d1fbae5a9e645", size = 1624270, upload-time = "2025-07-29T05:51:35.195Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/f8/cd84dee7b6ace0740908fd0af170f9fab50c2a41ccbc3806aabcb1050141/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b52dcf013b57464b6d1e51b627adfd69a8053e84b7103a7cd49c030f9ca44461", size = 1677294, upload-time = "2025-07-29T05:51:37.215Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/42/d0f1f85e50d401eccd12bf85c46ba84f947a84839c8a1c2c5f6e8ab1eb50/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9b2af240143dd2765e0fb661fd0361a1b469cab235039ea57663cda087250ea9", size = 1708958, upload-time = "2025-07-29T05:51:39.328Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/6b/f6fa6c5790fb602538483aa5a1b86fcbad66244997e5230d88f9412ef24c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ac77f709a2cde2cc71257ab2d8c74dd157c67a0558a0d2799d5d571b4c63d44d", size = 1651553, upload-time = "2025-07-29T05:51:41.356Z" },
+ { url = "https://files.pythonhosted.org/packages/04/36/a6d36ad545fa12e61d11d1932eef273928b0495e6a576eb2af04297fdd3c/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:47f6b962246f0a774fbd3b6b7be25d59b06fdb2f164cf2513097998fc6a29693", size = 1727688, upload-time = "2025-07-29T05:51:43.452Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/c8/f195e5e06608a97a4e52c5d41c7927301bf757a8e8bb5bbf8cef6c314961/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:760fb7db442f284996e39cf9915a94492e1896baac44f06ae551974907922b64", size = 1761157, upload-time = "2025-07-29T05:51:45.643Z" },
+ { url = "https://files.pythonhosted.org/packages/05/6a/ea199e61b67f25ba688d3ce93f63b49b0a4e3b3d380f03971b4646412fc6/aiohttp-3.12.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ad702e57dc385cae679c39d318def49aef754455f237499d5b99bea4ef582e51", size = 1710050, upload-time = "2025-07-29T05:51:48.203Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/2e/ffeb7f6256b33635c29dbed29a22a723ff2dd7401fff42ea60cf2060abfb/aiohttp-3.12.15-cp313-cp313-win32.whl", hash = "sha256:f813c3e9032331024de2eb2e32a88d86afb69291fbc37a3a3ae81cc9917fb3d0", size = 422647, upload-time = "2025-07-29T05:51:50.718Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/8e/78ee35774201f38d5e1ba079c9958f7629b1fd079459aea9467441dbfbf5/aiohttp-3.12.15-cp313-cp313-win_amd64.whl", hash = "sha256:1a649001580bdb37c6fdb1bebbd7e3bc688e8ec2b5c6f52edbb664662b17dc84", size = 449067, upload-time = "2025-07-29T05:51:52.549Z" },
+]
+
+[[package]]
+name = "aiosignal"
+version = "1.4.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "frozenlist" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" },
+]
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
+]
+
+[[package]]
+name = "anyio"
+version = "4.10.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
+ { name = "idna" },
+ { name = "sniffio" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" },
+]
+
+[[package]]
+name = "async-timeout"
+version = "5.0.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a5/ae/136395dfbfe00dfc94da3f3e136d0b13f394cba8f4841120e34226265780/async_timeout-5.0.1.tar.gz", hash = "sha256:d9321a7a3d5a6a5e187e824d2fa0793ce379a202935782d555d6e9d2735677d3", size = 9274, upload-time = "2024-11-06T16:41:39.6Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fe/ba/e2081de779ca30d473f21f5b30e0e737c438205440784c7dfc81efc2b029/async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c", size = 6233, upload-time = "2024-11-06T16:41:37.9Z" },
+]
+
+[[package]]
+name = "attrs"
+version = "25.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" },
+]
+
+[[package]]
+name = "authlib"
+version = "1.6.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cryptography" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/8e/a1/d8d1c6f8bc922c0b87ae0d933a8ed57be1bef6970894ed79c2852a153cd3/authlib-1.6.1.tar.gz", hash = "sha256:4dffdbb1460ba6ec8c17981a4c67af7d8af131231b5a36a88a1e8c80c111cdfd", size = 159988, upload-time = "2025-07-20T07:38:42.834Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f9/58/cc6a08053f822f98f334d38a27687b69c6655fb05cd74a7a5e70a2aeed95/authlib-1.6.1-py2.py3-none-any.whl", hash = "sha256:e9d2031c34c6309373ab845afc24168fe9e93dc52d252631f52642f21f5ed06e", size = 239299, upload-time = "2025-07-20T07:38:39.259Z" },
+]
+
+[[package]]
+name = "babel"
+version = "2.17.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/7d/6b/d52e42361e1aa00709585ecc30b3f9684b3ab62530771402248b1b1d6240/babel-2.17.0.tar.gz", hash = "sha256:0c54cffb19f690cdcc52a3b50bcbf71e07a808d1c80d549f2459b9d2cf0afb9d", size = 9951852, upload-time = "2025-02-01T15:17:41.026Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b7/b8/3fe70c75fe32afc4bb507f75563d39bc5642255d1d94f1f23604725780bf/babel-2.17.0-py3-none-any.whl", hash = "sha256:4d0b53093fdfb4b21c92b5213dba5a1b23885afa8383709427046b21c366e5f2", size = 10182537, upload-time = "2025-02-01T15:17:37.39Z" },
+]
+
+[[package]]
+name = "backports-asyncio-runner"
+version = "1.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/8e/ff/70dca7d7cb1cbc0edb2c6cc0c38b65cba36cccc491eca64cabd5fe7f8670/backports_asyncio_runner-1.2.0.tar.gz", hash = "sha256:a5aa7b2b7d8f8bfcaa2b57313f70792df84e32a2a746f585213373f900b42162", size = 69893, upload-time = "2025-07-02T02:27:15.685Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a0/59/76ab57e3fe74484f48a53f8e337171b4a2349e506eabe136d7e01d059086/backports_asyncio_runner-1.2.0-py3-none-any.whl", hash = "sha256:0da0a936a8aeb554eccb426dc55af3ba63bcdc69fa1a600b5bb305413a4477b5", size = 12313, upload-time = "2025-07-02T02:27:14.263Z" },
+]
+
+[[package]]
+name = "backports-datetime-fromisoformat"
+version = "2.0.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/71/81/eff3184acb1d9dc3ce95a98b6f3c81a49b4be296e664db8e1c2eeabef3d9/backports_datetime_fromisoformat-2.0.3.tar.gz", hash = "sha256:b58edc8f517b66b397abc250ecc737969486703a66eb97e01e6d51291b1a139d", size = 23588, upload-time = "2024-12-28T20:18:15.017Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/42/4b/d6b051ca4b3d76f23c2c436a9669f3be616b8cf6461a7e8061c7c4269642/backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5f681f638f10588fa3c101ee9ae2b63d3734713202ddfcfb6ec6cea0778a29d4", size = 27561, upload-time = "2024-12-28T20:16:47.974Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/40/e39b0d471e55eb1b5c7c81edab605c02f71c786d59fb875f0a6f23318747/backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:cd681460e9142f1249408e5aee6d178c6d89b49e06d44913c8fdfb6defda8d1c", size = 34448, upload-time = "2024-12-28T20:16:50.712Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/28/7a5c87c5561d14f1c9af979231fdf85d8f9fad7a95ff94e56d2205e2520a/backports_datetime_fromisoformat-2.0.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:ee68bc8735ae5058695b76d3bb2aee1d137c052a11c8303f1e966aa23b72b65b", size = 27093, upload-time = "2024-12-28T20:16:52.994Z" },
+ { url = "https://files.pythonhosted.org/packages/80/ba/f00296c5c4536967c7d1136107fdb91c48404fe769a4a6fd5ab045629af8/backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8273fe7932db65d952a43e238318966eab9e49e8dd546550a41df12175cc2be4", size = 52836, upload-time = "2024-12-28T20:16:55.283Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/92/bb1da57a069ddd601aee352a87262c7ae93467e66721d5762f59df5021a6/backports_datetime_fromisoformat-2.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39d57ea50aa5a524bb239688adc1d1d824c31b6094ebd39aa164d6cadb85de22", size = 52798, upload-time = "2024-12-28T20:16:56.64Z" },
+ { url = "https://files.pythonhosted.org/packages/df/ef/b6cfd355982e817ccdb8d8d109f720cab6e06f900784b034b30efa8fa832/backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ac6272f87693e78209dc72e84cf9ab58052027733cd0721c55356d3c881791cf", size = 52891, upload-time = "2024-12-28T20:16:58.887Z" },
+ { url = "https://files.pythonhosted.org/packages/37/39/b13e3ae8a7c5d88b68a6e9248ffe7066534b0cfe504bf521963e61b6282d/backports_datetime_fromisoformat-2.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:44c497a71f80cd2bcfc26faae8857cf8e79388e3d5fbf79d2354b8c360547d58", size = 52955, upload-time = "2024-12-28T20:17:00.028Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/e4/70cffa3ce1eb4f2ff0c0d6f5d56285aacead6bd3879b27a2ba57ab261172/backports_datetime_fromisoformat-2.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:6335a4c9e8af329cb1ded5ab41a666e1448116161905a94e054f205aa6d263bc", size = 29323, upload-time = "2024-12-28T20:17:01.125Z" },
+ { url = "https://files.pythonhosted.org/packages/62/f5/5bc92030deadf34c365d908d4533709341fb05d0082db318774fdf1b2bcb/backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2e4b66e017253cdbe5a1de49e0eecff3f66cd72bcb1229d7db6e6b1832c0443", size = 27626, upload-time = "2024-12-28T20:17:03.448Z" },
+ { url = "https://files.pythonhosted.org/packages/28/45/5885737d51f81dfcd0911dd5c16b510b249d4c4cf6f4a991176e0358a42a/backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:43e2d648e150777e13bbc2549cc960373e37bf65bd8a5d2e0cef40e16e5d8dd0", size = 34588, upload-time = "2024-12-28T20:17:04.459Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/6d/bd74de70953f5dd3e768c8fc774af942af0ce9f211e7c38dd478fa7ea910/backports_datetime_fromisoformat-2.0.3-cp311-cp311-macosx_11_0_x86_64.whl", hash = "sha256:4ce6326fd86d5bae37813c7bf1543bae9e4c215ec6f5afe4c518be2635e2e005", size = 27162, upload-time = "2024-12-28T20:17:06.752Z" },
+ { url = "https://files.pythonhosted.org/packages/47/ba/1d14b097f13cce45b2b35db9898957578b7fcc984e79af3b35189e0d332f/backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7c8fac333bf860208fd522a5394369ee3c790d0aa4311f515fcc4b6c5ef8d75", size = 54482, upload-time = "2024-12-28T20:17:08.15Z" },
+ { url = "https://files.pythonhosted.org/packages/25/e9/a2a7927d053b6fa148b64b5e13ca741ca254c13edca99d8251e9a8a09cfe/backports_datetime_fromisoformat-2.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24a4da5ab3aa0cc293dc0662a0c6d1da1a011dc1edcbc3122a288cfed13a0b45", size = 54362, upload-time = "2024-12-28T20:17:10.605Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/99/394fb5e80131a7d58c49b89e78a61733a9994885804a0bb582416dd10c6f/backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:58ea11e3bf912bd0a36b0519eae2c5b560b3cb972ea756e66b73fb9be460af01", size = 54162, upload-time = "2024-12-28T20:17:12.301Z" },
+ { url = "https://files.pythonhosted.org/packages/88/25/1940369de573c752889646d70b3fe8645e77b9e17984e72a554b9b51ffc4/backports_datetime_fromisoformat-2.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8a375c7dbee4734318714a799b6c697223e4bbb57232af37fbfff88fb48a14c6", size = 54118, upload-time = "2024-12-28T20:17:13.609Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/46/f275bf6c61683414acaf42b2df7286d68cfef03e98b45c168323d7707778/backports_datetime_fromisoformat-2.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:ac677b1664c4585c2e014739f6678137c8336815406052349c85898206ec7061", size = 29329, upload-time = "2024-12-28T20:17:16.124Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/0f/69bbdde2e1e57c09b5f01788804c50e68b29890aada999f2b1a40519def9/backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:66ce47ee1ba91e146149cf40565c3d750ea1be94faf660ca733d8601e0848147", size = 27630, upload-time = "2024-12-28T20:17:19.442Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/1d/1c84a50c673c87518b1adfeafcfd149991ed1f7aedc45d6e5eac2f7d19d7/backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:8b7e069910a66b3bba61df35b5f879e5253ff0821a70375b9daf06444d046fa4", size = 34707, upload-time = "2024-12-28T20:17:21.79Z" },
+ { url = "https://files.pythonhosted.org/packages/71/44/27eae384e7e045cda83f70b551d04b4a0b294f9822d32dea1cbf1592de59/backports_datetime_fromisoformat-2.0.3-cp312-cp312-macosx_11_0_x86_64.whl", hash = "sha256:a3b5d1d04a9e0f7b15aa1e647c750631a873b298cdd1255687bb68779fe8eb35", size = 27280, upload-time = "2024-12-28T20:17:24.503Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/7a/a4075187eb6bbb1ff6beb7229db5f66d1070e6968abeb61e056fa51afa5e/backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1b95986430e789c076610aea704db20874f0781b8624f648ca9fb6ef67c6e1", size = 55094, upload-time = "2024-12-28T20:17:25.546Z" },
+ { url = "https://files.pythonhosted.org/packages/71/03/3fced4230c10af14aacadc195fe58e2ced91d011217b450c2e16a09a98c8/backports_datetime_fromisoformat-2.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffe5f793db59e2f1d45ec35a1cf51404fdd69df9f6952a0c87c3060af4c00e32", size = 55605, upload-time = "2024-12-28T20:17:29.208Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/0a/4b34a838c57bd16d3e5861ab963845e73a1041034651f7459e9935289cfd/backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:620e8e73bd2595dfff1b4d256a12b67fce90ece3de87b38e1dde46b910f46f4d", size = 55353, upload-time = "2024-12-28T20:17:32.433Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/68/07d13c6e98e1cad85606a876367ede2de46af859833a1da12c413c201d78/backports_datetime_fromisoformat-2.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4cf9c0a985d68476c1cabd6385c691201dda2337d7453fb4da9679ce9f23f4e7", size = 55298, upload-time = "2024-12-28T20:17:34.919Z" },
+ { url = "https://files.pythonhosted.org/packages/60/33/45b4d5311f42360f9b900dea53ab2bb20a3d61d7f9b7c37ddfcb3962f86f/backports_datetime_fromisoformat-2.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:d144868a73002e6e2e6fef72333e7b0129cecdd121aa8f1edba7107fd067255d", size = 29375, upload-time = "2024-12-28T20:17:36.018Z" },
+ { url = "https://files.pythonhosted.org/packages/be/03/7eaa9f9bf290395d57fd30d7f1f2f9dff60c06a31c237dc2beb477e8f899/backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90e202e72a3d5aae673fcc8c9a4267d56b2f532beeb9173361293625fe4d2039", size = 28980, upload-time = "2024-12-28T20:18:06.554Z" },
+ { url = "https://files.pythonhosted.org/packages/47/80/a0ecf33446c7349e79f54cc532933780341d20cff0ee12b5bfdcaa47067e/backports_datetime_fromisoformat-2.0.3-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2df98ef1b76f5a58bb493dda552259ba60c3a37557d848e039524203951c9f06", size = 28449, upload-time = "2024-12-28T20:18:07.77Z" },
+]
+
+[[package]]
+name = "backrefs"
+version = "5.9"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/eb/a7/312f673df6a79003279e1f55619abbe7daebbb87c17c976ddc0345c04c7b/backrefs-5.9.tar.gz", hash = "sha256:808548cb708d66b82ee231f962cb36faaf4f2baab032f2fbb783e9c2fdddaa59", size = 5765857, upload-time = "2025-06-22T19:34:13.97Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/19/4d/798dc1f30468134906575156c089c492cf79b5a5fd373f07fe26c4d046bf/backrefs-5.9-py310-none-any.whl", hash = "sha256:db8e8ba0e9de81fcd635f440deab5ae5f2591b54ac1ebe0550a2ca063488cd9f", size = 380267, upload-time = "2025-06-22T19:34:05.252Z" },
+ { url = "https://files.pythonhosted.org/packages/55/07/f0b3375bf0d06014e9787797e6b7cc02b38ac9ff9726ccfe834d94e9991e/backrefs-5.9-py311-none-any.whl", hash = "sha256:6907635edebbe9b2dc3de3a2befff44d74f30a4562adbb8b36f21252ea19c5cf", size = 392072, upload-time = "2025-06-22T19:34:06.743Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/12/4f345407259dd60a0997107758ba3f221cf89a9b5a0f8ed5b961aef97253/backrefs-5.9-py312-none-any.whl", hash = "sha256:7fdf9771f63e6028d7fee7e0c497c81abda597ea45d6b8f89e8ad76994f5befa", size = 397947, upload-time = "2025-06-22T19:34:08.172Z" },
+ { url = "https://files.pythonhosted.org/packages/10/bf/fa31834dc27a7f05e5290eae47c82690edc3a7b37d58f7fb35a1bdbf355b/backrefs-5.9-py313-none-any.whl", hash = "sha256:cc37b19fa219e93ff825ed1fed8879e47b4d89aa7a1884860e2db64ccd7c676b", size = 399843, upload-time = "2025-06-22T19:34:09.68Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/24/b29af34b2c9c41645a9f4ff117bae860291780d73880f449e0b5d948c070/backrefs-5.9-py314-none-any.whl", hash = "sha256:df5e169836cc8acb5e440ebae9aad4bf9d15e226d3bad049cf3f6a5c20cc8dc9", size = 411762, upload-time = "2025-06-22T19:34:11.037Z" },
+ { url = "https://files.pythonhosted.org/packages/41/ff/392bff89415399a979be4a65357a41d92729ae8580a66073d8ec8d810f98/backrefs-5.9-py39-none-any.whl", hash = "sha256:f48ee18f6252b8f5777a22a00a09a85de0ca931658f1dd96d4406a34f3748c60", size = 380265, upload-time = "2025-06-22T19:34:12.405Z" },
+]
+
+[[package]]
+name = "bandit"
+version = "1.8.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+ { name = "pyyaml" },
+ { name = "rich" },
+ { name = "stevedore" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fb/b5/7eb834e213d6f73aace21938e5e90425c92e5f42abafaf8a6d5d21beed51/bandit-1.8.6.tar.gz", hash = "sha256:dbfe9c25fc6961c2078593de55fd19f2559f9e45b99f1272341f5b95dea4e56b", size = 4240271, upload-time = "2025-07-06T03:10:50.9Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/48/ca/ba5f909b40ea12ec542d5d7bdd13ee31c4d65f3beed20211ef81c18fa1f3/bandit-1.8.6-py3-none-any.whl", hash = "sha256:3348e934d736fcdb68b6aa4030487097e23a501adf3e7827b63658df464dddd0", size = 133808, upload-time = "2025-07-06T03:10:49.134Z" },
+]
+
+[package.optional-dependencies]
+toml = [
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+]
+
+[[package]]
+name = "black"
+version = "25.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "mypy-extensions" },
+ { name = "packaging" },
+ { name = "pathspec" },
+ { name = "platformdirs" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/94/49/26a7b0f3f35da4b5a65f081943b7bcd22d7002f5f0fb8098ec1ff21cb6ef/black-25.1.0.tar.gz", hash = "sha256:33496d5cd1222ad73391352b4ae8da15253c5de89b93a80b3e2c8d9a19ec2666", size = 649449, upload-time = "2025-01-29T04:15:40.373Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4d/3b/4ba3f93ac8d90410423fdd31d7541ada9bcee1df32fb90d26de41ed40e1d/black-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:759e7ec1e050a15f89b770cefbf91ebee8917aac5c20483bc2d80a6c3a04df32", size = 1629419, upload-time = "2025-01-29T05:37:06.642Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/02/0bde0485146a8a5e694daed47561785e8b77a0466ccc1f3e485d5ef2925e/black-25.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e519ecf93120f34243e6b0054db49c00a35f84f195d5bce7e9f5cfc578fc2da", size = 1461080, upload-time = "2025-01-29T05:37:09.321Z" },
+ { url = "https://files.pythonhosted.org/packages/52/0e/abdf75183c830eaca7589144ff96d49bce73d7ec6ad12ef62185cc0f79a2/black-25.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:055e59b198df7ac0b7efca5ad7ff2516bca343276c466be72eb04a3bcc1f82d7", size = 1766886, upload-time = "2025-01-29T04:18:24.432Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/a6/97d8bb65b1d8a41f8a6736222ba0a334db7b7b77b8023ab4568288f23973/black-25.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:db8ea9917d6f8fc62abd90d944920d95e73c83a5ee3383493e35d271aca872e9", size = 1419404, upload-time = "2025-01-29T04:19:04.296Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/4f/87f596aca05c3ce5b94b8663dbfe242a12843caaa82dd3f85f1ffdc3f177/black-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a39337598244de4bae26475f77dda852ea00a93bd4c728e09eacd827ec929df0", size = 1614372, upload-time = "2025-01-29T05:37:11.71Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/d0/2c34c36190b741c59c901e56ab7f6e54dad8df05a6272a9747ecef7c6036/black-25.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96c1c7cd856bba8e20094e36e0f948718dc688dba4a9d78c3adde52b9e6c2299", size = 1442865, upload-time = "2025-01-29T05:37:14.309Z" },
+ { url = "https://files.pythonhosted.org/packages/21/d4/7518c72262468430ead45cf22bd86c883a6448b9eb43672765d69a8f1248/black-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bce2e264d59c91e52d8000d507eb20a9aca4a778731a08cfff7e5ac4a4bb7096", size = 1749699, upload-time = "2025-01-29T04:18:17.688Z" },
+ { url = "https://files.pythonhosted.org/packages/58/db/4f5beb989b547f79096e035c4981ceb36ac2b552d0ac5f2620e941501c99/black-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:172b1dbff09f86ce6f4eb8edf9dede08b1fce58ba194c87d7a4f1a5aa2f5b3c2", size = 1428028, upload-time = "2025-01-29T04:18:51.711Z" },
+ { url = "https://files.pythonhosted.org/packages/83/71/3fe4741df7adf015ad8dfa082dd36c94ca86bb21f25608eb247b4afb15b2/black-25.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4b60580e829091e6f9238c848ea6750efed72140b91b048770b64e74fe04908b", size = 1650988, upload-time = "2025-01-29T05:37:16.707Z" },
+ { url = "https://files.pythonhosted.org/packages/13/f3/89aac8a83d73937ccd39bbe8fc6ac8860c11cfa0af5b1c96d081facac844/black-25.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e2978f6df243b155ef5fa7e558a43037c3079093ed5d10fd84c43900f2d8ecc", size = 1453985, upload-time = "2025-01-29T05:37:18.273Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/22/b99efca33f1f3a1d2552c714b1e1b5ae92efac6c43e790ad539a163d1754/black-25.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b48735872ec535027d979e8dcb20bf4f70b5ac75a8ea99f127c106a7d7aba9f", size = 1783816, upload-time = "2025-01-29T04:18:33.823Z" },
+ { url = "https://files.pythonhosted.org/packages/18/7e/a27c3ad3822b6f2e0e00d63d58ff6299a99a5b3aee69fa77cd4b0076b261/black-25.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea0213189960bda9cf99be5b8c8ce66bb054af5e9e861249cd23471bd7b0b3ba", size = 1440860, upload-time = "2025-01-29T04:19:12.944Z" },
+ { url = "https://files.pythonhosted.org/packages/98/87/0edf98916640efa5d0696e1abb0a8357b52e69e82322628f25bf14d263d1/black-25.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8f0b18a02996a836cc9c9c78e5babec10930862827b1b724ddfe98ccf2f2fe4f", size = 1650673, upload-time = "2025-01-29T05:37:20.574Z" },
+ { url = "https://files.pythonhosted.org/packages/52/e5/f7bf17207cf87fa6e9b676576749c6b6ed0d70f179a3d812c997870291c3/black-25.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afebb7098bfbc70037a053b91ae8437c3857482d3a690fefc03e9ff7aa9a5fd3", size = 1453190, upload-time = "2025-01-29T05:37:22.106Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/ee/adda3d46d4a9120772fae6de454c8495603c37c4c3b9c60f25b1ab6401fe/black-25.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:030b9759066a4ee5e5aca28c3c77f9c64789cdd4de8ac1df642c40b708be6171", size = 1782926, upload-time = "2025-01-29T04:18:58.564Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/64/94eb5f45dcb997d2082f097a3944cfc7fe87e071907f677e80788a2d7b7a/black-25.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:a22f402b410566e2d1c950708c77ebf5ebd5d0d88a6a2e87c86d9fb48afa0d18", size = 1442613, upload-time = "2025-01-29T04:19:27.63Z" },
+ { url = "https://files.pythonhosted.org/packages/09/71/54e999902aed72baf26bca0d50781b01838251a462612966e9fc4891eadd/black-25.1.0-py3-none-any.whl", hash = "sha256:95e8176dae143ba9097f351d174fdaf0ccd29efb414b362ae3fd72bf0f710717", size = 207646, upload-time = "2025-01-29T04:15:38.082Z" },
+]
+
+[[package]]
+name = "cachetools"
+version = "6.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/8a/89/817ad5d0411f136c484d535952aef74af9b25e0d99e90cdffbe121e6d628/cachetools-6.1.0.tar.gz", hash = "sha256:b4c4f404392848db3ce7aac34950d17be4d864da4b8b66911008e430bc544587", size = 30714, upload-time = "2025-06-16T18:51:03.07Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/00/f0/2ef431fe4141f5e334759d73e81120492b23b2824336883a91ac04ba710b/cachetools-6.1.0-py3-none-any.whl", hash = "sha256:1c7bb3cf9193deaf3508b7c5f2a79986c13ea38965c5adcff1f84519cf39163e", size = 11189, upload-time = "2025-06-16T18:51:01.514Z" },
+]
+
+[[package]]
+name = "certifi"
+version = "2025.8.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload-time = "2025-08-03T03:07:47.08Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload-time = "2025-08-03T03:07:45.777Z" },
+]
+
+[[package]]
+name = "cffi"
+version = "1.17.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pycparser" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/90/07/f44ca684db4e4f08a3fdc6eeb9a0d15dc6883efc7b8c90357fdbf74e186c/cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", size = 182191, upload-time = "2024-09-04T20:43:30.027Z" },
+ { url = "https://files.pythonhosted.org/packages/08/fd/cc2fedbd887223f9f5d170c96e57cbf655df9831a6546c1727ae13fa977a/cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", size = 178592, upload-time = "2024-09-04T20:43:32.108Z" },
+ { url = "https://files.pythonhosted.org/packages/de/cc/4635c320081c78d6ffc2cab0a76025b691a91204f4aa317d568ff9280a2d/cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", size = 426024, upload-time = "2024-09-04T20:43:34.186Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/7b/3b2b250f3aab91abe5f8a51ada1b717935fdaec53f790ad4100fe2ec64d1/cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", size = 448188, upload-time = "2024-09-04T20:43:36.286Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/48/1b9283ebbf0ec065148d8de05d647a986c5f22586b18120020452fff8f5d/cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", size = 455571, upload-time = "2024-09-04T20:43:38.586Z" },
+ { url = "https://files.pythonhosted.org/packages/40/87/3b8452525437b40f39ca7ff70276679772ee7e8b394934ff60e63b7b090c/cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", size = 436687, upload-time = "2024-09-04T20:43:40.084Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/fb/4da72871d177d63649ac449aec2e8a29efe0274035880c7af59101ca2232/cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", size = 446211, upload-time = "2024-09-04T20:43:41.526Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/a0/62f00bcb411332106c02b663b26f3545a9ef136f80d5df746c05878f8c4b/cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", size = 461325, upload-time = "2024-09-04T20:43:43.117Z" },
+ { url = "https://files.pythonhosted.org/packages/36/83/76127035ed2e7e27b0787604d99da630ac3123bfb02d8e80c633f218a11d/cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", size = 438784, upload-time = "2024-09-04T20:43:45.256Z" },
+ { url = "https://files.pythonhosted.org/packages/21/81/a6cd025db2f08ac88b901b745c163d884641909641f9b826e8cb87645942/cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", size = 461564, upload-time = "2024-09-04T20:43:46.779Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/fe/4d41c2f200c4a457933dbd98d3cf4e911870877bd94d9656cc0fcb390681/cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", size = 171804, upload-time = "2024-09-04T20:43:48.186Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/b6/0b0f5ab93b0df4acc49cae758c81fe4e5ef26c3ae2e10cc69249dfd8b3ab/cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", size = 181299, upload-time = "2024-09-04T20:43:49.812Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/f4/927e3a8899e52a27fa57a48607ff7dc91a9ebe97399b357b85a0c7892e00/cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", size = 182264, upload-time = "2024-09-04T20:43:51.124Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/f5/6c3a8efe5f503175aaddcbea6ad0d2c96dad6f5abb205750d1b3df44ef29/cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", size = 178651, upload-time = "2024-09-04T20:43:52.872Z" },
+ { url = "https://files.pythonhosted.org/packages/94/dd/a3f0118e688d1b1a57553da23b16bdade96d2f9bcda4d32e7d2838047ff7/cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", size = 445259, upload-time = "2024-09-04T20:43:56.123Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/ea/70ce63780f096e16ce8588efe039d3c4f91deb1dc01e9c73a287939c79a6/cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", size = 469200, upload-time = "2024-09-04T20:43:57.891Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/a0/a4fa9f4f781bda074c3ddd57a572b060fa0df7655d2a4247bbe277200146/cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", size = 477235, upload-time = "2024-09-04T20:44:00.18Z" },
+ { url = "https://files.pythonhosted.org/packages/62/12/ce8710b5b8affbcdd5c6e367217c242524ad17a02fe5beec3ee339f69f85/cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", size = 459721, upload-time = "2024-09-04T20:44:01.585Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/6b/d45873c5e0242196f042d555526f92aa9e0c32355a1be1ff8c27f077fd37/cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", size = 467242, upload-time = "2024-09-04T20:44:03.467Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/52/d9a0e523a572fbccf2955f5abe883cfa8bcc570d7faeee06336fbd50c9fc/cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", size = 477999, upload-time = "2024-09-04T20:44:05.023Z" },
+ { url = "https://files.pythonhosted.org/packages/44/74/f2a2460684a1a2d00ca799ad880d54652841a780c4c97b87754f660c7603/cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", size = 454242, upload-time = "2024-09-04T20:44:06.444Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/4a/34599cac7dfcd888ff54e801afe06a19c17787dfd94495ab0c8d35fe99fb/cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b", size = 478604, upload-time = "2024-09-04T20:44:08.206Z" },
+ { url = "https://files.pythonhosted.org/packages/34/33/e1b8a1ba29025adbdcda5fb3a36f94c03d771c1b7b12f726ff7fef2ebe36/cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", size = 171727, upload-time = "2024-09-04T20:44:09.481Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/97/50228be003bb2802627d28ec0627837ac0bf35c90cf769812056f235b2d1/cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", size = 181400, upload-time = "2024-09-04T20:44:10.873Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" },
+ { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" },
+ { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" },
+ { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" },
+ { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" },
+ { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" },
+ { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" },
+ { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" },
+]
+
+[[package]]
+name = "cfgv"
+version = "3.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/11/74/539e56497d9bd1d484fd863dd69cbbfa653cd2aa27abfe35653494d85e94/cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560", size = 7114, upload-time = "2023-08-12T20:38:17.776Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c5/55/51844dd50c4fc7a33b653bfaba4c2456f06955289ca770a5dbd5fd267374/cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9", size = 7249, upload-time = "2023-08-12T20:38:16.269Z" },
+]
+
+[[package]]
+name = "chardet"
+version = "5.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" },
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.4.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload-time = "2025-08-09T07:57:28.46Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d6/98/f3b8013223728a99b908c9344da3aa04ee6e3fa235f19409033eda92fb78/charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72", size = 207695, upload-time = "2025-08-09T07:55:36.452Z" },
+ { url = "https://files.pythonhosted.org/packages/21/40/5188be1e3118c82dcb7c2a5ba101b783822cfb413a0268ed3be0468532de/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe", size = 147153, upload-time = "2025-08-09T07:55:38.467Z" },
+ { url = "https://files.pythonhosted.org/packages/37/60/5d0d74bc1e1380f0b72c327948d9c2aca14b46a9efd87604e724260f384c/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601", size = 160428, upload-time = "2025-08-09T07:55:40.072Z" },
+ { url = "https://files.pythonhosted.org/packages/85/9a/d891f63722d9158688de58d050c59dc3da560ea7f04f4c53e769de5140f5/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c", size = 157627, upload-time = "2025-08-09T07:55:41.706Z" },
+ { url = "https://files.pythonhosted.org/packages/65/1a/7425c952944a6521a9cfa7e675343f83fd82085b8af2b1373a2409c683dc/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2", size = 152388, upload-time = "2025-08-09T07:55:43.262Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/c9/a2c9c2a355a8594ce2446085e2ec97fd44d323c684ff32042e2a6b718e1d/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0", size = 150077, upload-time = "2025-08-09T07:55:44.903Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/38/20a1f44e4851aa1c9105d6e7110c9d020e093dfa5836d712a5f074a12bf7/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0", size = 161631, upload-time = "2025-08-09T07:55:46.346Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/fa/384d2c0f57edad03d7bec3ebefb462090d8905b4ff5a2d2525f3bb711fac/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0", size = 159210, upload-time = "2025-08-09T07:55:47.539Z" },
+ { url = "https://files.pythonhosted.org/packages/33/9e/eca49d35867ca2db336b6ca27617deed4653b97ebf45dfc21311ce473c37/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a", size = 153739, upload-time = "2025-08-09T07:55:48.744Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/91/26c3036e62dfe8de8061182d33be5025e2424002125c9500faff74a6735e/charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f", size = 99825, upload-time = "2025-08-09T07:55:50.305Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/c6/f05db471f81af1fa01839d44ae2a8bfeec8d2a8b4590f16c4e7393afd323/charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669", size = 107452, upload-time = "2025-08-09T07:55:51.461Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/b5/991245018615474a60965a7c9cd2b4efbaabd16d582a5547c47ee1c7730b/charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b", size = 204483, upload-time = "2025-08-09T07:55:53.12Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/2a/ae245c41c06299ec18262825c1569c5d3298fc920e4ddf56ab011b417efd/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64", size = 145520, upload-time = "2025-08-09T07:55:54.712Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/a4/b3b6c76e7a635748c4421d2b92c7b8f90a432f98bda5082049af37ffc8e3/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91", size = 158876, upload-time = "2025-08-09T07:55:56.024Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/e6/63bb0e10f90a8243c5def74b5b105b3bbbfb3e7bb753915fe333fb0c11ea/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f", size = 156083, upload-time = "2025-08-09T07:55:57.582Z" },
+ { url = "https://files.pythonhosted.org/packages/87/df/b7737ff046c974b183ea9aa111b74185ac8c3a326c6262d413bd5a1b8c69/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07", size = 150295, upload-time = "2025-08-09T07:55:59.147Z" },
+ { url = "https://files.pythonhosted.org/packages/61/f1/190d9977e0084d3f1dc169acd060d479bbbc71b90bf3e7bf7b9927dec3eb/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30", size = 148379, upload-time = "2025-08-09T07:56:00.364Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/92/27dbe365d34c68cfe0ca76f1edd70e8705d82b378cb54ebbaeabc2e3029d/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14", size = 160018, upload-time = "2025-08-09T07:56:01.678Z" },
+ { url = "https://files.pythonhosted.org/packages/99/04/baae2a1ea1893a01635d475b9261c889a18fd48393634b6270827869fa34/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c", size = 157430, upload-time = "2025-08-09T07:56:02.87Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/36/77da9c6a328c54d17b960c89eccacfab8271fdaaa228305330915b88afa9/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae", size = 151600, upload-time = "2025-08-09T07:56:04.089Z" },
+ { url = "https://files.pythonhosted.org/packages/64/d4/9eb4ff2c167edbbf08cdd28e19078bf195762e9bd63371689cab5ecd3d0d/charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849", size = 99616, upload-time = "2025-08-09T07:56:05.658Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/9c/996a4a028222e7761a96634d1820de8a744ff4327a00ada9c8942033089b/charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c", size = 107108, upload-time = "2025-08-09T07:56:07.176Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655, upload-time = "2025-08-09T07:56:08.475Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223, upload-time = "2025-08-09T07:56:09.708Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366, upload-time = "2025-08-09T07:56:11.326Z" },
+ { url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104, upload-time = "2025-08-09T07:56:13.014Z" },
+ { url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830, upload-time = "2025-08-09T07:56:14.428Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854, upload-time = "2025-08-09T07:56:16.051Z" },
+ { url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670, upload-time = "2025-08-09T07:56:17.314Z" },
+ { url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501, upload-time = "2025-08-09T07:56:18.641Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173, upload-time = "2025-08-09T07:56:20.289Z" },
+ { url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822, upload-time = "2025-08-09T07:56:21.551Z" },
+ { url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543, upload-time = "2025-08-09T07:56:23.115Z" },
+ { url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload-time = "2025-08-09T07:56:24.721Z" },
+ { url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload-time = "2025-08-09T07:56:26.004Z" },
+ { url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload-time = "2025-08-09T07:56:27.25Z" },
+ { url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload-time = "2025-08-09T07:56:28.515Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload-time = "2025-08-09T07:56:29.716Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload-time = "2025-08-09T07:56:30.984Z" },
+ { url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload-time = "2025-08-09T07:56:32.252Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload-time = "2025-08-09T07:56:33.481Z" },
+ { url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload-time = "2025-08-09T07:56:34.739Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload-time = "2025-08-09T07:56:35.981Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload-time = "2025-08-09T07:56:37.339Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342, upload-time = "2025-08-09T07:56:38.687Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995, upload-time = "2025-08-09T07:56:40.048Z" },
+ { url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640, upload-time = "2025-08-09T07:56:41.311Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636, upload-time = "2025-08-09T07:56:43.195Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939, upload-time = "2025-08-09T07:56:44.819Z" },
+ { url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580, upload-time = "2025-08-09T07:56:46.684Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870, upload-time = "2025-08-09T07:56:47.941Z" },
+ { url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797, upload-time = "2025-08-09T07:56:49.756Z" },
+ { url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224, upload-time = "2025-08-09T07:56:51.369Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086, upload-time = "2025-08-09T07:56:52.722Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400, upload-time = "2025-08-09T07:56:55.172Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload-time = "2025-08-09T07:57:26.864Z" },
+]
+
+[[package]]
+name = "click"
+version = "8.2.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" },
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
+]
+
+[[package]]
+name = "coloredlogs"
+version = "15.0.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "humanfriendly" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/cc/c7/eed8f27100517e8c0e6b923d5f0845d0cb99763da6fdee00478f91db7325/coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0", size = 278520, upload-time = "2021-06-11T10:22:45.202Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a7/06/3d6badcf13db419e25b07041d9c7b4a2c331d3f4e7134445ec5df57714cd/coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934", size = 46018, upload-time = "2021-06-11T10:22:42.561Z" },
+]
+
+[[package]]
+name = "coverage"
+version = "7.10.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f4/2c/253cc41cd0f40b84c1c34c5363e0407d73d4a1cae005fed6db3b823175bd/coverage-7.10.3.tar.gz", hash = "sha256:812ba9250532e4a823b070b0420a36499859542335af3dca8f47fc6aa1a05619", size = 822936, upload-time = "2025-08-10T21:27:39.968Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2f/44/e14576c34b37764c821866909788ff7463228907ab82bae188dab2b421f1/coverage-7.10.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:53808194afdf948c462215e9403cca27a81cf150d2f9b386aee4dab614ae2ffe", size = 215964, upload-time = "2025-08-10T21:25:22.828Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/15/f4f92d9b83100903efe06c9396ee8d8bdba133399d37c186fc5b16d03a87/coverage-7.10.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f4d1b837d1abf72187a61645dbf799e0d7705aa9232924946e1f57eb09a3bf00", size = 216361, upload-time = "2025-08-10T21:25:25.603Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/3a/c92e8cd5e89acc41cfc026dfb7acedf89661ce2ea1ee0ee13aacb6b2c20c/coverage-7.10.3-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2a90dd4505d3cc68b847ab10c5ee81822a968b5191664e8a0801778fa60459fa", size = 243115, upload-time = "2025-08-10T21:25:27.09Z" },
+ { url = "https://files.pythonhosted.org/packages/23/53/c1d8c2778823b1d95ca81701bb8f42c87dc341a2f170acdf716567523490/coverage-7.10.3-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d52989685ff5bf909c430e6d7f6550937bc6d6f3e6ecb303c97a86100efd4596", size = 244927, upload-time = "2025-08-10T21:25:28.77Z" },
+ { url = "https://files.pythonhosted.org/packages/79/41/1e115fd809031f432b4ff8e2ca19999fb6196ab95c35ae7ad5e07c001130/coverage-7.10.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdb558a1d97345bde3a9f4d3e8d11c9e5611f748646e9bb61d7d612a796671b5", size = 246784, upload-time = "2025-08-10T21:25:30.195Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/b2/0eba9bdf8f1b327ae2713c74d4b7aa85451bb70622ab4e7b8c000936677c/coverage-7.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c9e6331a8f09cb1fc8bda032752af03c366870b48cce908875ba2620d20d0ad4", size = 244828, upload-time = "2025-08-10T21:25:31.785Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/cc/74c56b6bf71f2a53b9aa3df8bc27163994e0861c065b4fe3a8ac290bed35/coverage-7.10.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:992f48bf35b720e174e7fae916d943599f1a66501a2710d06c5f8104e0756ee1", size = 242844, upload-time = "2025-08-10T21:25:33.37Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/7b/ac183fbe19ac5596c223cb47af5737f4437e7566100b7e46cc29b66695a5/coverage-7.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c5595fc4ad6a39312c786ec3326d7322d0cf10e3ac6a6df70809910026d67cfb", size = 243721, upload-time = "2025-08-10T21:25:34.939Z" },
+ { url = "https://files.pythonhosted.org/packages/57/96/cb90da3b5a885af48f531905234a1e7376acfc1334242183d23154a1c285/coverage-7.10.3-cp310-cp310-win32.whl", hash = "sha256:9e92fa1f2bd5a57df9d00cf9ce1eb4ef6fccca4ceabec1c984837de55329db34", size = 218481, upload-time = "2025-08-10T21:25:36.935Z" },
+ { url = "https://files.pythonhosted.org/packages/15/67/1ba4c7d75745c4819c54a85766e0a88cc2bff79e1760c8a2debc34106dc2/coverage-7.10.3-cp310-cp310-win_amd64.whl", hash = "sha256:b96524d6e4a3ce6a75c56bb15dbd08023b0ae2289c254e15b9fbdddf0c577416", size = 219382, upload-time = "2025-08-10T21:25:38.267Z" },
+ { url = "https://files.pythonhosted.org/packages/87/04/810e506d7a19889c244d35199cbf3239a2f952b55580aa42ca4287409424/coverage-7.10.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2ff2e2afdf0d51b9b8301e542d9c21a8d084fd23d4c8ea2b3a1b3c96f5f7397", size = 216075, upload-time = "2025-08-10T21:25:39.891Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/50/6b3fbab034717b4af3060bdaea6b13dfdc6b1fad44b5082e2a95cd378a9a/coverage-7.10.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:18ecc5d1b9a8c570f6c9b808fa9a2b16836b3dd5414a6d467ae942208b095f85", size = 216476, upload-time = "2025-08-10T21:25:41.137Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/96/4368c624c1ed92659812b63afc76c492be7867ac8e64b7190b88bb26d43c/coverage-7.10.3-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1af4461b25fe92889590d438905e1fc79a95680ec2a1ff69a591bb3fdb6c7157", size = 246865, upload-time = "2025-08-10T21:25:42.408Z" },
+ { url = "https://files.pythonhosted.org/packages/34/12/5608f76070939395c17053bf16e81fd6c06cf362a537ea9d07e281013a27/coverage-7.10.3-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3966bc9a76b09a40dc6063c8b10375e827ea5dfcaffae402dd65953bef4cba54", size = 248800, upload-time = "2025-08-10T21:25:44.098Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/52/7cc90c448a0ad724283cbcdfd66b8d23a598861a6a22ac2b7b8696491798/coverage-7.10.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:205a95b87ef4eb303b7bc5118b47b6b6604a644bcbdb33c336a41cfc0a08c06a", size = 250904, upload-time = "2025-08-10T21:25:45.384Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/70/9967b847063c1c393b4f4d6daab1131558ebb6b51f01e7df7150aa99f11d/coverage-7.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5b3801b79fb2ad61e3c7e2554bab754fc5f105626056980a2b9cf3aef4f13f84", size = 248597, upload-time = "2025-08-10T21:25:47.059Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/fe/263307ce6878b9ed4865af42e784b42bb82d066bcf10f68defa42931c2c7/coverage-7.10.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0dc69c60224cda33d384572da945759756e3f06b9cdac27f302f53961e63160", size = 246647, upload-time = "2025-08-10T21:25:48.334Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/27/d27af83ad162eba62c4eb7844a1de6cf7d9f6b185df50b0a3514a6f80ddd/coverage-7.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a83d4f134bab2c7ff758e6bb1541dd72b54ba295ced6a63d93efc2e20cb9b124", size = 247290, upload-time = "2025-08-10T21:25:49.945Z" },
+ { url = "https://files.pythonhosted.org/packages/28/83/904ff27e15467a5622dbe9ad2ed5831b4a616a62570ec5924d06477dff5a/coverage-7.10.3-cp311-cp311-win32.whl", hash = "sha256:54e409dd64e5302b2a8fdf44ec1c26f47abd1f45a2dcf67bd161873ee05a59b8", size = 218521, upload-time = "2025-08-10T21:25:51.208Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/29/bc717b8902faaccf0ca486185f0dcab4778561a529dde51cb157acaafa16/coverage-7.10.3-cp311-cp311-win_amd64.whl", hash = "sha256:30c601610a9b23807c5e9e2e442054b795953ab85d525c3de1b1b27cebeb2117", size = 219412, upload-time = "2025-08-10T21:25:52.494Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/7a/5a1a7028c11bb589268c656c6b3f2bbf06e0aced31bbdf7a4e94e8442cc0/coverage-7.10.3-cp311-cp311-win_arm64.whl", hash = "sha256:dabe662312a97958e932dee056f2659051d822552c0b866823e8ba1c2fe64770", size = 218091, upload-time = "2025-08-10T21:25:54.102Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/62/13c0b66e966c43d7aa64dadc8cd2afa1f5a2bf9bb863bdabc21fb94e8b63/coverage-7.10.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:449c1e2d3a84d18bd204258a897a87bc57380072eb2aded6a5b5226046207b42", size = 216262, upload-time = "2025-08-10T21:25:55.367Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/f0/59fdf79be7ac2f0206fc739032f482cfd3f66b18f5248108ff192741beae/coverage-7.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1d4f9ce50b9261ad196dc2b2e9f1fbbee21651b54c3097a25ad783679fd18294", size = 216496, upload-time = "2025-08-10T21:25:56.759Z" },
+ { url = "https://files.pythonhosted.org/packages/34/b1/bc83788ba31bde6a0c02eb96bbc14b2d1eb083ee073beda18753fa2c4c66/coverage-7.10.3-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4dd4564207b160d0d45c36a10bc0a3d12563028e8b48cd6459ea322302a156d7", size = 247989, upload-time = "2025-08-10T21:25:58.067Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/29/f8bdf88357956c844bd872e87cb16748a37234f7f48c721dc7e981145eb7/coverage-7.10.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5ca3c9530ee072b7cb6a6ea7b640bcdff0ad3b334ae9687e521e59f79b1d0437", size = 250738, upload-time = "2025-08-10T21:25:59.406Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/df/6396301d332b71e42bbe624670af9376f63f73a455cc24723656afa95796/coverage-7.10.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b6df359e59fa243c9925ae6507e27f29c46698359f45e568fd51b9315dbbe587", size = 251868, upload-time = "2025-08-10T21:26:00.65Z" },
+ { url = "https://files.pythonhosted.org/packages/91/21/d760b2df6139b6ef62c9cc03afb9bcdf7d6e36ed4d078baacffa618b4c1c/coverage-7.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a181e4c2c896c2ff64c6312db3bda38e9ade2e1aa67f86a5628ae85873786cea", size = 249790, upload-time = "2025-08-10T21:26:02.009Z" },
+ { url = "https://files.pythonhosted.org/packages/69/91/5dcaa134568202397fa4023d7066d4318dc852b53b428052cd914faa05e1/coverage-7.10.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a374d4e923814e8b72b205ef6b3d3a647bb50e66f3558582eda074c976923613", size = 247907, upload-time = "2025-08-10T21:26:03.757Z" },
+ { url = "https://files.pythonhosted.org/packages/38/ed/70c0e871cdfef75f27faceada461206c1cc2510c151e1ef8d60a6fedda39/coverage-7.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:daeefff05993e5e8c6e7499a8508e7bd94502b6b9a9159c84fd1fe6bce3151cb", size = 249344, upload-time = "2025-08-10T21:26:05.11Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/55/c8a273ed503cedc07f8a00dcd843daf28e849f0972e4c6be4c027f418ad6/coverage-7.10.3-cp312-cp312-win32.whl", hash = "sha256:187ecdcac21f9636d570e419773df7bd2fda2e7fa040f812e7f95d0bddf5f79a", size = 218693, upload-time = "2025-08-10T21:26:06.534Z" },
+ { url = "https://files.pythonhosted.org/packages/94/58/dd3cfb2473b85be0b6eb8c5b6d80b6fc3f8f23611e69ef745cef8cf8bad5/coverage-7.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:4a50ad2524ee7e4c2a95e60d2b0b83283bdfc745fe82359d567e4f15d3823eb5", size = 219501, upload-time = "2025-08-10T21:26:08.195Z" },
+ { url = "https://files.pythonhosted.org/packages/56/af/7cbcbf23d46de6f24246e3f76b30df099d05636b30c53c158a196f7da3ad/coverage-7.10.3-cp312-cp312-win_arm64.whl", hash = "sha256:c112f04e075d3495fa3ed2200f71317da99608cbb2e9345bdb6de8819fc30571", size = 218135, upload-time = "2025-08-10T21:26:09.584Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/ff/239e4de9cc149c80e9cc359fab60592365b8c4cbfcad58b8a939d18c6898/coverage-7.10.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b99e87304ffe0eb97c5308447328a584258951853807afdc58b16143a530518a", size = 216298, upload-time = "2025-08-10T21:26:10.973Z" },
+ { url = "https://files.pythonhosted.org/packages/56/da/28717da68f8ba68f14b9f558aaa8f3e39ada8b9a1ae4f4977c8f98b286d5/coverage-7.10.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4af09c7574d09afbc1ea7da9dcea23665c01f3bc1b1feb061dac135f98ffc53a", size = 216546, upload-time = "2025-08-10T21:26:12.616Z" },
+ { url = "https://files.pythonhosted.org/packages/de/bb/e1ade16b9e3f2d6c323faeb6bee8e6c23f3a72760a5d9af102ef56a656cb/coverage-7.10.3-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:488e9b50dc5d2aa9521053cfa706209e5acf5289e81edc28291a24f4e4488f46", size = 247538, upload-time = "2025-08-10T21:26:14.455Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/2f/6ae1db51dc34db499bfe340e89f79a63bd115fc32513a7bacdf17d33cd86/coverage-7.10.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:913ceddb4289cbba3a310704a424e3fb7aac2bc0c3a23ea473193cb290cf17d4", size = 250141, upload-time = "2025-08-10T21:26:15.787Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/ed/33efd8819895b10c66348bf26f011dd621e804866c996ea6893d682218df/coverage-7.10.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b1f91cbc78c7112ab84ed2a8defbccd90f888fcae40a97ddd6466b0bec6ae8a", size = 251415, upload-time = "2025-08-10T21:26:17.535Z" },
+ { url = "https://files.pythonhosted.org/packages/26/04/cb83826f313d07dc743359c9914d9bc460e0798da9a0e38b4f4fabc207ed/coverage-7.10.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0bac054d45af7cd938834b43a9878b36ea92781bcb009eab040a5b09e9927e3", size = 249575, upload-time = "2025-08-10T21:26:18.921Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/fd/ae963c7a8e9581c20fa4355ab8940ca272554d8102e872dbb932a644e410/coverage-7.10.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:fe72cbdd12d9e0f4aca873fa6d755e103888a7f9085e4a62d282d9d5b9f7928c", size = 247466, upload-time = "2025-08-10T21:26:20.263Z" },
+ { url = "https://files.pythonhosted.org/packages/99/e8/b68d1487c6af370b8d5ef223c6d7e250d952c3acfbfcdbf1a773aa0da9d2/coverage-7.10.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c1e2e927ab3eadd7c244023927d646e4c15c65bb2ac7ae3c3e9537c013700d21", size = 249084, upload-time = "2025-08-10T21:26:21.638Z" },
+ { url = "https://files.pythonhosted.org/packages/66/4d/a0bcb561645c2c1e21758d8200443669d6560d2a2fb03955291110212ec4/coverage-7.10.3-cp313-cp313-win32.whl", hash = "sha256:24d0c13de473b04920ddd6e5da3c08831b1170b8f3b17461d7429b61cad59ae0", size = 218735, upload-time = "2025-08-10T21:26:23.009Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/c3/78b4adddbc0feb3b223f62761e5f9b4c5a758037aaf76e0a5845e9e35e48/coverage-7.10.3-cp313-cp313-win_amd64.whl", hash = "sha256:3564aae76bce4b96e2345cf53b4c87e938c4985424a9be6a66ee902626edec4c", size = 219531, upload-time = "2025-08-10T21:26:24.474Z" },
+ { url = "https://files.pythonhosted.org/packages/70/1b/1229c0b2a527fa5390db58d164aa896d513a1fbb85a1b6b6676846f00552/coverage-7.10.3-cp313-cp313-win_arm64.whl", hash = "sha256:f35580f19f297455f44afcd773c9c7a058e52eb6eb170aa31222e635f2e38b87", size = 218162, upload-time = "2025-08-10T21:26:25.847Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/26/1c1f450e15a3bf3eaecf053ff64538a2612a23f05b21d79ce03be9ff5903/coverage-7.10.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07009152f497a0464ffdf2634586787aea0e69ddd023eafb23fc38267db94b84", size = 217003, upload-time = "2025-08-10T21:26:27.231Z" },
+ { url = "https://files.pythonhosted.org/packages/29/96/4b40036181d8c2948454b458750960956a3c4785f26a3c29418bbbee1666/coverage-7.10.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dd2ba5f0c7e7e8cc418be2f0c14c4d9e3f08b8fb8e4c0f83c2fe87d03eb655e", size = 217238, upload-time = "2025-08-10T21:26:28.83Z" },
+ { url = "https://files.pythonhosted.org/packages/62/23/8dfc52e95da20957293fb94d97397a100e63095ec1e0ef5c09dd8c6f591a/coverage-7.10.3-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1ae22b97003c74186e034a93e4f946c75fad8c0ce8d92fbbc168b5e15ee2841f", size = 258561, upload-time = "2025-08-10T21:26:30.475Z" },
+ { url = "https://files.pythonhosted.org/packages/59/95/00e7fcbeda3f632232f4c07dde226afe3511a7781a000aa67798feadc535/coverage-7.10.3-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:eb329f1046888a36b1dc35504d3029e1dd5afe2196d94315d18c45ee380f67d5", size = 260735, upload-time = "2025-08-10T21:26:32.333Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/4c/f4666cbc4571804ba2a65b078ff0de600b0b577dc245389e0bc9b69ae7ca/coverage-7.10.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ce01048199a91f07f96ca3074b0c14021f4fe7ffd29a3e6a188ac60a5c3a4af8", size = 262960, upload-time = "2025-08-10T21:26:33.701Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/a5/8a9e8a7b12a290ed98b60f73d1d3e5e9ced75a4c94a0d1a671ce3ddfff2a/coverage-7.10.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:08b989a06eb9dfacf96d42b7fb4c9a22bafa370d245dc22fa839f2168c6f9fa1", size = 260515, upload-time = "2025-08-10T21:26:35.16Z" },
+ { url = "https://files.pythonhosted.org/packages/86/11/bb59f7f33b2cac0c5b17db0d9d0abba9c90d9eda51a6e727b43bd5fce4ae/coverage-7.10.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:669fe0d4e69c575c52148511029b722ba8d26e8a3129840c2ce0522e1452b256", size = 258278, upload-time = "2025-08-10T21:26:36.539Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/22/3646f8903743c07b3e53fded0700fed06c580a980482f04bf9536657ac17/coverage-7.10.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3262d19092771c83f3413831d9904b1ccc5f98da5de4ffa4ad67f5b20c7aaf7b", size = 259408, upload-time = "2025-08-10T21:26:37.954Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/5c/6375e9d905da22ddea41cd85c30994b8b6f6c02e44e4c5744b76d16b026f/coverage-7.10.3-cp313-cp313t-win32.whl", hash = "sha256:cc0ee4b2ccd42cab7ee6be46d8a67d230cb33a0a7cd47a58b587a7063b6c6b0e", size = 219396, upload-time = "2025-08-10T21:26:39.426Z" },
+ { url = "https://files.pythonhosted.org/packages/33/3b/7da37fd14412b8c8b6e73c3e7458fef6b1b05a37f990a9776f88e7740c89/coverage-7.10.3-cp313-cp313t-win_amd64.whl", hash = "sha256:03db599f213341e2960430984e04cf35fb179724e052a3ee627a068653cf4a7c", size = 220458, upload-time = "2025-08-10T21:26:40.905Z" },
+ { url = "https://files.pythonhosted.org/packages/28/cc/59a9a70f17edab513c844ee7a5c63cf1057041a84cc725b46a51c6f8301b/coverage-7.10.3-cp313-cp313t-win_arm64.whl", hash = "sha256:46eae7893ba65f53c71284585a262f083ef71594f05ec5c85baf79c402369098", size = 218722, upload-time = "2025-08-10T21:26:42.362Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/84/bb773b51a06edbf1231b47dc810a23851f2796e913b335a0fa364773b842/coverage-7.10.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:bce8b8180912914032785850d8f3aacb25ec1810f5f54afc4a8b114e7a9b55de", size = 216280, upload-time = "2025-08-10T21:26:44.132Z" },
+ { url = "https://files.pythonhosted.org/packages/92/a8/4d8ca9c111d09865f18d56facff64d5fa076a5593c290bd1cfc5dceb8dba/coverage-7.10.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:07790b4b37d56608536f7c1079bd1aa511567ac2966d33d5cec9cf520c50a7c8", size = 216557, upload-time = "2025-08-10T21:26:45.598Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/b2/eb668bfc5060194bc5e1ccd6f664e8e045881cfee66c42a2aa6e6c5b26e8/coverage-7.10.3-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e79367ef2cd9166acedcbf136a458dfe9a4a2dd4d1ee95738fb2ee581c56f667", size = 247598, upload-time = "2025-08-10T21:26:47.081Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/b0/9faa4ac62c8822219dd83e5d0e73876398af17d7305968aed8d1606d1830/coverage-7.10.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:419d2a0f769f26cb1d05e9ccbc5eab4cb5d70231604d47150867c07822acbdf4", size = 250131, upload-time = "2025-08-10T21:26:48.65Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/90/203537e310844d4bf1bdcfab89c1e05c25025c06d8489b9e6f937ad1a9e2/coverage-7.10.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ee221cf244757cdc2ac882e3062ab414b8464ad9c884c21e878517ea64b3fa26", size = 251485, upload-time = "2025-08-10T21:26:50.368Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/b2/9d894b26bc53c70a1fe503d62240ce6564256d6d35600bdb86b80e516e7d/coverage-7.10.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c2079d8cdd6f7373d628e14b3357f24d1db02c9dc22e6a007418ca7a2be0435a", size = 249488, upload-time = "2025-08-10T21:26:52.045Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/28/af167dbac5281ba6c55c933a0ca6675d68347d5aee39cacc14d44150b922/coverage-7.10.3-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:bd8df1f83c0703fa3ca781b02d36f9ec67ad9cb725b18d486405924f5e4270bd", size = 247419, upload-time = "2025-08-10T21:26:53.533Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/1c/9a4ddc9f0dcb150d4cd619e1c4bb39bcf694c6129220bdd1e5895d694dda/coverage-7.10.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6b4e25e0fa335c8aa26e42a52053f3786a61cc7622b4d54ae2dad994aa754fec", size = 248917, upload-time = "2025-08-10T21:26:55.11Z" },
+ { url = "https://files.pythonhosted.org/packages/92/27/c6a60c7cbe10dbcdcd7fc9ee89d531dc04ea4c073800279bb269954c5a9f/coverage-7.10.3-cp314-cp314-win32.whl", hash = "sha256:d7c3d02c2866deb217dce664c71787f4b25420ea3eaf87056f44fb364a3528f5", size = 218999, upload-time = "2025-08-10T21:26:56.637Z" },
+ { url = "https://files.pythonhosted.org/packages/36/09/a94c1369964ab31273576615d55e7d14619a1c47a662ed3e2a2fe4dee7d4/coverage-7.10.3-cp314-cp314-win_amd64.whl", hash = "sha256:9c8916d44d9e0fe6cdb2227dc6b0edd8bc6c8ef13438bbbf69af7482d9bb9833", size = 219801, upload-time = "2025-08-10T21:26:58.207Z" },
+ { url = "https://files.pythonhosted.org/packages/23/59/f5cd2a80f401c01cf0f3add64a7b791b7d53fd6090a4e3e9ea52691cf3c4/coverage-7.10.3-cp314-cp314-win_arm64.whl", hash = "sha256:1007d6a2b3cf197c57105cc1ba390d9ff7f0bee215ced4dea530181e49c65ab4", size = 218381, upload-time = "2025-08-10T21:26:59.707Z" },
+ { url = "https://files.pythonhosted.org/packages/73/3d/89d65baf1ea39e148ee989de6da601469ba93c1d905b17dfb0b83bd39c96/coverage-7.10.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:ebc8791d346410d096818788877d675ca55c91db87d60e8f477bd41c6970ffc6", size = 217019, upload-time = "2025-08-10T21:27:01.242Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/7d/d9850230cd9c999ce3a1e600f85c2fff61a81c301334d7a1faa1a5ba19c8/coverage-7.10.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f4e4d8e75f6fd3c6940ebeed29e3d9d632e1f18f6fb65d33086d99d4d073241", size = 217237, upload-time = "2025-08-10T21:27:03.442Z" },
+ { url = "https://files.pythonhosted.org/packages/36/51/b87002d417202ab27f4a1cd6bd34ee3b78f51b3ddbef51639099661da991/coverage-7.10.3-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:24581ed69f132b6225a31b0228ae4885731cddc966f8a33fe5987288bdbbbd5e", size = 258735, upload-time = "2025-08-10T21:27:05.124Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/02/1f8612bfcb46fc7ca64a353fff1cd4ed932bb6e0b4e0bb88b699c16794b8/coverage-7.10.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ec151569ddfccbf71bac8c422dce15e176167385a00cd86e887f9a80035ce8a5", size = 260901, upload-time = "2025-08-10T21:27:06.68Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/3a/fe39e624ddcb2373908bd922756384bb70ac1c5009b0d1674eb326a3e428/coverage-7.10.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2ae8e7c56290b908ee817200c0b65929b8050bc28530b131fe7c6dfee3e7d86b", size = 263157, upload-time = "2025-08-10T21:27:08.398Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/89/496b6d5a10fa0d0691a633bb2b2bcf4f38f0bdfcbde21ad9e32d1af328ed/coverage-7.10.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fb742309766d7e48e9eb4dc34bc95a424707bc6140c0e7d9726e794f11b92a0", size = 260597, upload-time = "2025-08-10T21:27:10.237Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/a6/8b5bf6a9e8c6aaeb47d5fe9687014148efc05c3588110246d5fdeef9b492/coverage-7.10.3-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:c65e2a5b32fbe1e499f1036efa6eb9cb4ea2bf6f7168d0e7a5852f3024f471b1", size = 258353, upload-time = "2025-08-10T21:27:11.773Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/6d/ad131be74f8afd28150a07565dfbdc86592fd61d97e2dc83383d9af219f0/coverage-7.10.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d48d2cb07d50f12f4f18d2bb75d9d19e3506c26d96fffabf56d22936e5ed8f7c", size = 259504, upload-time = "2025-08-10T21:27:13.254Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/30/fc9b5097092758cba3375a8cc4ff61774f8cd733bcfb6c9d21a60077a8d8/coverage-7.10.3-cp314-cp314t-win32.whl", hash = "sha256:dec0d9bc15ee305e09fe2cd1911d3f0371262d3cfdae05d79515d8cb712b4869", size = 219782, upload-time = "2025-08-10T21:27:14.736Z" },
+ { url = "https://files.pythonhosted.org/packages/72/9b/27fbf79451b1fac15c4bda6ec6e9deae27cf7c0648c1305aa21a3454f5c4/coverage-7.10.3-cp314-cp314t-win_amd64.whl", hash = "sha256:424ea93a323aa0f7f01174308ea78bde885c3089ec1bef7143a6d93c3e24ef64", size = 220898, upload-time = "2025-08-10T21:27:16.297Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/cf/a32bbf92869cbf0b7c8b84325327bfc718ad4b6d2c63374fef3d58e39306/coverage-7.10.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f5983c132a62d93d71c9ef896a0b9bf6e6828d8d2ea32611f58684fba60bba35", size = 218922, upload-time = "2025-08-10T21:27:18.22Z" },
+ { url = "https://files.pythonhosted.org/packages/84/19/e67f4ae24e232c7f713337f3f4f7c9c58afd0c02866fb07c7b9255a19ed7/coverage-7.10.3-py3-none-any.whl", hash = "sha256:416a8d74dc0adfd33944ba2f405897bab87b7e9e84a391e09d241956bd953ce1", size = 207921, upload-time = "2025-08-10T21:27:38.254Z" },
+]
+
+[package.optional-dependencies]
+toml = [
+ { name = "tomli", marker = "python_full_version <= '3.11'" },
+]
+
+[[package]]
+name = "cryptography"
+version = "45.0.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/d6/0d/d13399c94234ee8f3df384819dc67e0c5ce215fb751d567a55a1f4b028c7/cryptography-45.0.6.tar.gz", hash = "sha256:5c966c732cf6e4a276ce83b6e4c729edda2df6929083a952cc7da973c539c719", size = 744949, upload-time = "2025-08-05T23:59:27.93Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8c/29/2793d178d0eda1ca4a09a7c4e09a5185e75738cc6d526433e8663b460ea6/cryptography-45.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:048e7ad9e08cf4c0ab07ff7f36cc3115924e22e2266e034450a890d9e312dd74", size = 7042702, upload-time = "2025-08-05T23:58:23.464Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/b6/cabd07410f222f32c8d55486c464f432808abaa1f12af9afcbe8f2f19030/cryptography-45.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:44647c5d796f5fc042bbc6d61307d04bf29bccb74d188f18051b635f20a9c75f", size = 4206483, upload-time = "2025-08-05T23:58:27.132Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/9e/f9c7d36a38b1cfeb1cc74849aabe9bf817990f7603ff6eb485e0d70e0b27/cryptography-45.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e40b80ecf35ec265c452eea0ba94c9587ca763e739b8e559c128d23bff7ebbbf", size = 4429679, upload-time = "2025-08-05T23:58:29.152Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/2a/4434c17eb32ef30b254b9e8b9830cee4e516f08b47fdd291c5b1255b8101/cryptography-45.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:00e8724bdad672d75e6f069b27970883179bd472cd24a63f6e620ca7e41cc0c5", size = 4210553, upload-time = "2025-08-05T23:58:30.596Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/1d/09a5df8e0c4b7970f5d1f3aff1b640df6d4be28a64cae970d56c6cf1c772/cryptography-45.0.6-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a3085d1b319d35296176af31c90338eeb2ddac8104661df79f80e1d9787b8b2", size = 3894499, upload-time = "2025-08-05T23:58:32.03Z" },
+ { url = "https://files.pythonhosted.org/packages/79/62/120842ab20d9150a9d3a6bdc07fe2870384e82f5266d41c53b08a3a96b34/cryptography-45.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1b7fa6a1c1188c7ee32e47590d16a5a0646270921f8020efc9a511648e1b2e08", size = 4458484, upload-time = "2025-08-05T23:58:33.526Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/80/1bc3634d45ddfed0871bfba52cf8f1ad724761662a0c792b97a951fb1b30/cryptography-45.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:275ba5cc0d9e320cd70f8e7b96d9e59903c815ca579ab96c1e37278d231fc402", size = 4210281, upload-time = "2025-08-05T23:58:35.445Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/fe/ffb12c2d83d0ee625f124880a1f023b5878f79da92e64c37962bbbe35f3f/cryptography-45.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f4028f29a9f38a2025abedb2e409973709c660d44319c61762202206ed577c42", size = 4456890, upload-time = "2025-08-05T23:58:36.923Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/8e/b3f3fe0dc82c77a0deb5f493b23311e09193f2268b77196ec0f7a36e3f3e/cryptography-45.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ee411a1b977f40bd075392c80c10b58025ee5c6b47a822a33c1198598a7a5f05", size = 4333247, upload-time = "2025-08-05T23:58:38.781Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/a6/c3ef2ab9e334da27a1d7b56af4a2417d77e7806b2e0f90d6267ce120d2e4/cryptography-45.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e2a21a8eda2d86bb604934b6b37691585bd095c1f788530c1fcefc53a82b3453", size = 4565045, upload-time = "2025-08-05T23:58:40.415Z" },
+ { url = "https://files.pythonhosted.org/packages/31/c3/77722446b13fa71dddd820a5faab4ce6db49e7e0bf8312ef4192a3f78e2f/cryptography-45.0.6-cp311-abi3-win32.whl", hash = "sha256:d063341378d7ee9c91f9d23b431a3502fc8bfacd54ef0a27baa72a0843b29159", size = 2928923, upload-time = "2025-08-05T23:58:41.919Z" },
+ { url = "https://files.pythonhosted.org/packages/38/63/a025c3225188a811b82932a4dcc8457a26c3729d81578ccecbcce2cb784e/cryptography-45.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:833dc32dfc1e39b7376a87b9a6a4288a10aae234631268486558920029b086ec", size = 3403805, upload-time = "2025-08-05T23:58:43.792Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/af/bcfbea93a30809f126d51c074ee0fac5bd9d57d068edf56c2a73abedbea4/cryptography-45.0.6-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:3436128a60a5e5490603ab2adbabc8763613f638513ffa7d311c900a8349a2a0", size = 7020111, upload-time = "2025-08-05T23:58:45.316Z" },
+ { url = "https://files.pythonhosted.org/packages/98/c6/ea5173689e014f1a8470899cd5beeb358e22bb3cf5a876060f9d1ca78af4/cryptography-45.0.6-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0d9ef57b6768d9fa58e92f4947cea96ade1233c0e236db22ba44748ffedca394", size = 4198169, upload-time = "2025-08-05T23:58:47.121Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/73/b12995edc0c7e2311ffb57ebd3b351f6b268fed37d93bfc6f9856e01c473/cryptography-45.0.6-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea3c42f2016a5bbf71825537c2ad753f2870191134933196bee408aac397b3d9", size = 4421273, upload-time = "2025-08-05T23:58:48.557Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/6e/286894f6f71926bc0da67408c853dd9ba953f662dcb70993a59fd499f111/cryptography-45.0.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:20ae4906a13716139d6d762ceb3e0e7e110f7955f3bc3876e3a07f5daadec5f3", size = 4199211, upload-time = "2025-08-05T23:58:50.139Z" },
+ { url = "https://files.pythonhosted.org/packages/de/34/a7f55e39b9623c5cb571d77a6a90387fe557908ffc44f6872f26ca8ae270/cryptography-45.0.6-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dac5ec199038b8e131365e2324c03d20e97fe214af051d20c49db129844e8b3", size = 3883732, upload-time = "2025-08-05T23:58:52.253Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/b9/c6d32edbcba0cd9f5df90f29ed46a65c4631c4fbe11187feb9169c6ff506/cryptography-45.0.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:18f878a34b90d688982e43f4b700408b478102dd58b3e39de21b5ebf6509c301", size = 4450655, upload-time = "2025-08-05T23:58:53.848Z" },
+ { url = "https://files.pythonhosted.org/packages/77/2d/09b097adfdee0227cfd4c699b3375a842080f065bab9014248933497c3f9/cryptography-45.0.6-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5bd6020c80c5b2b2242d6c48487d7b85700f5e0038e67b29d706f98440d66eb5", size = 4198956, upload-time = "2025-08-05T23:58:55.209Z" },
+ { url = "https://files.pythonhosted.org/packages/55/66/061ec6689207d54effdff535bbdf85cc380d32dd5377173085812565cf38/cryptography-45.0.6-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:eccddbd986e43014263eda489abbddfbc287af5cddfd690477993dbb31e31016", size = 4449859, upload-time = "2025-08-05T23:58:56.639Z" },
+ { url = "https://files.pythonhosted.org/packages/41/ff/e7d5a2ad2d035e5a2af116e1a3adb4d8fcd0be92a18032917a089c6e5028/cryptography-45.0.6-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:550ae02148206beb722cfe4ef0933f9352bab26b087af00e48fdfb9ade35c5b3", size = 4320254, upload-time = "2025-08-05T23:58:58.833Z" },
+ { url = "https://files.pythonhosted.org/packages/82/27/092d311af22095d288f4db89fcaebadfb2f28944f3d790a4cf51fe5ddaeb/cryptography-45.0.6-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5b64e668fc3528e77efa51ca70fadcd6610e8ab231e3e06ae2bab3b31c2b8ed9", size = 4554815, upload-time = "2025-08-05T23:59:00.283Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/01/aa2f4940262d588a8fdf4edabe4cda45854d00ebc6eaac12568b3a491a16/cryptography-45.0.6-cp37-abi3-win32.whl", hash = "sha256:780c40fb751c7d2b0c6786ceee6b6f871e86e8718a8ff4bc35073ac353c7cd02", size = 2912147, upload-time = "2025-08-05T23:59:01.716Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/bc/16e0276078c2de3ceef6b5a34b965f4436215efac45313df90d55f0ba2d2/cryptography-45.0.6-cp37-abi3-win_amd64.whl", hash = "sha256:20d15aed3ee522faac1a39fbfdfee25d17b1284bafd808e1640a74846d7c4d1b", size = 3390459, upload-time = "2025-08-05T23:59:03.358Z" },
+ { url = "https://files.pythonhosted.org/packages/56/d2/4482d97c948c029be08cb29854a91bd2ae8da7eb9c4152461f1244dcea70/cryptography-45.0.6-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:705bb7c7ecc3d79a50f236adda12ca331c8e7ecfbea51edd931ce5a7a7c4f012", size = 3576812, upload-time = "2025-08-05T23:59:04.833Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/24/55fc238fcaa122855442604b8badb2d442367dfbd5a7ca4bb0bd346e263a/cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:826b46dae41a1155a0c0e66fafba43d0ede1dc16570b95e40c4d83bfcf0a451d", size = 4141694, upload-time = "2025-08-05T23:59:06.66Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/7e/3ea4fa6fbe51baf3903806a0241c666b04c73d2358a3ecce09ebee8b9622/cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:cc4d66f5dc4dc37b89cfef1bd5044387f7a1f6f0abb490815628501909332d5d", size = 4375010, upload-time = "2025-08-05T23:59:08.14Z" },
+ { url = "https://files.pythonhosted.org/packages/50/42/ec5a892d82d2a2c29f80fc19ced4ba669bca29f032faf6989609cff1f8dc/cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:f68f833a9d445cc49f01097d95c83a850795921b3f7cc6488731e69bde3288da", size = 4141377, upload-time = "2025-08-05T23:59:09.584Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/d7/246c4c973a22b9c2931999da953a2c19cae7c66b9154c2d62ffed811225e/cryptography-45.0.6-pp310-pypy310_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:3b5bf5267e98661b9b888a9250d05b063220dfa917a8203744454573c7eb79db", size = 4374609, upload-time = "2025-08-05T23:59:11.923Z" },
+ { url = "https://files.pythonhosted.org/packages/78/6d/c49ccf243f0a1b0781c2a8de8123ee552f0c8a417c6367a24d2ecb7c11b3/cryptography-45.0.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2384f2ab18d9be88a6e4f8972923405e2dbb8d3e16c6b43f15ca491d7831bd18", size = 3322156, upload-time = "2025-08-05T23:59:13.597Z" },
+ { url = "https://files.pythonhosted.org/packages/61/69/c252de4ec047ba2f567ecb53149410219577d408c2aea9c989acae7eafce/cryptography-45.0.6-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fc022c1fa5acff6def2fc6d7819bbbd31ccddfe67d075331a65d9cfb28a20983", size = 3584669, upload-time = "2025-08-05T23:59:15.431Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/fe/deea71e9f310a31fe0a6bfee670955152128d309ea2d1c79e2a5ae0f0401/cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:3de77e4df42ac8d4e4d6cdb342d989803ad37707cf8f3fbf7b088c9cbdd46427", size = 4153022, upload-time = "2025-08-05T23:59:16.954Z" },
+ { url = "https://files.pythonhosted.org/packages/60/45/a77452f5e49cb580feedba6606d66ae7b82c128947aa754533b3d1bd44b0/cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:599c8d7df950aa68baa7e98f7b73f4f414c9f02d0e8104a30c0182a07732638b", size = 4386802, upload-time = "2025-08-05T23:59:18.55Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/b9/a2f747d2acd5e3075fdf5c145c7c3568895daaa38b3b0c960ef830db6cdc/cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:31a2b9a10530a1cb04ffd6aa1cd4d3be9ed49f7d77a4dafe198f3b382f41545c", size = 4152706, upload-time = "2025-08-05T23:59:20.044Z" },
+ { url = "https://files.pythonhosted.org/packages/81/ec/381b3e8d0685a3f3f304a382aa3dfce36af2d76467da0fd4bb21ddccc7b2/cryptography-45.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:e5b3dda1b00fb41da3af4c5ef3f922a200e33ee5ba0f0bc9ecf0b0c173958385", size = 4386740, upload-time = "2025-08-05T23:59:21.525Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/76/cf8d69da8d0b5ecb0db406f24a63a3f69ba5e791a11b782aeeefef27ccbb/cryptography-45.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:629127cfdcdc6806dfe234734d7cb8ac54edaf572148274fa377a7d3405b0043", size = 3331874, upload-time = "2025-08-05T23:59:23.017Z" },
+]
+
+[[package]]
+name = "distlib"
+version = "0.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/96/8e/709914eb2b5749865801041647dc7f4e6d00b549cfe88b65ca192995f07c/distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d", size = 614605, upload-time = "2025-07-17T16:52:00.465Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/33/6b/e0547afaf41bf2c42e52430072fa5658766e3d65bd4b03a563d1b6336f57/distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16", size = 469047, upload-time = "2025-07-17T16:51:58.613Z" },
+]
+
+[[package]]
+name = "dllist"
+version = "2.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b2/ce/dda13123329d55555de67b414206f14f4f80b96cb273ec59d76d5ce6b5c3/dllist-2.0.0.tar.gz", hash = "sha256:7413ba963aaa1b2b6827eadd7908e40e635b19108ab431667485eaf75c492bf4", size = 5369, upload-time = "2025-02-10T15:41:07.637Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7b/07/a212ca4cfe56fa35c8315307e37df218e5946c726a792b5c7a795b245c10/dllist-2.0.0-py3-none-any.whl", hash = "sha256:cd307b1a91bc46fae084f8c817d79be7e34951b149a2fd69004772e03573bfb3", size = 5721, upload-time = "2025-02-10T15:41:06.045Z" },
+]
+
+[[package]]
+name = "dparse"
+version = "0.6.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "packaging" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/29/ee/96c65e17222b973f0d3d0aa9bad6a59104ca1b0eb5b659c25c2900fccd85/dparse-0.6.4.tar.gz", hash = "sha256:90b29c39e3edc36c6284c82c4132648eaf28a01863eb3c231c2512196132201a", size = 27912, upload-time = "2024-11-08T16:52:06.444Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/56/26/035d1c308882514a1e6ddca27f9d3e570d67a0e293e7b4d910a70c8fe32b/dparse-0.6.4-py3-none-any.whl", hash = "sha256:fbab4d50d54d0e739fbb4dedfc3d92771003a5b9aa8545ca7a7045e3b174af57", size = 11925, upload-time = "2024-11-08T16:52:03.844Z" },
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/0b/9f/a65090624ecf468cdca03533906e7c69ed7588582240cfe7cc9e770b50eb/exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88", size = 29749, upload-time = "2025-05-10T17:42:51.123Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/36/f4/c6e662dade71f56cd2f3735141b265c3c79293c109549c1e6933b0651ffc/exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10", size = 16674, upload-time = "2025-05-10T17:42:49.33Z" },
+]
+
+[[package]]
+name = "execnet"
+version = "2.1.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524, upload-time = "2024-04-08T09:04:19.245Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612, upload-time = "2024-04-08T09:04:17.414Z" },
+]
+
+[[package]]
+name = "fastapi"
+version = "0.116.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pydantic" },
+ { name = "starlette" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" },
+]
+
+[[package]]
+name = "filelock"
+version = "3.12.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d5/71/bb1326535231229dd69a9dd2e338f6f54b2d57bd88fc4a52285c0ab8a5f6/filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd", size = 13758, upload-time = "2023-09-13T16:01:03.605Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5e/5d/97afbafd9d584ff1b45fcb354a479a3609bd97f912f8f1f6c563cb1fae21/filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4", size = 11221, upload-time = "2023-09-13T16:01:02.163Z" },
+]
+
+[[package]]
+name = "flatbuffers"
+version = "25.2.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e4/30/eb5dce7994fc71a2f685d98ec33cc660c0a5887db5610137e60d8cbc4489/flatbuffers-25.2.10.tar.gz", hash = "sha256:97e451377a41262f8d9bd4295cc836133415cc03d8cb966410a4af92eb00d26e", size = 22170, upload-time = "2025-02-11T04:26:46.257Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl", hash = "sha256:ebba5f4d5ea615af3f7fd70fc310636fbb2bbd1f566ac0a23d98dd412de50051", size = 30953, upload-time = "2025-02-11T04:26:44.484Z" },
+]
+
+[[package]]
+name = "frozenlist"
+version = "1.7.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/af/36/0da0a49409f6b47cc2d060dc8c9040b897b5902a8a4e37d9bc1deb11f680/frozenlist-1.7.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc4df77d638aa2ed703b878dd093725b72a824c3c546c076e8fdf276f78ee84a", size = 81304, upload-time = "2025-06-09T22:59:46.226Z" },
+ { url = "https://files.pythonhosted.org/packages/77/f0/77c11d13d39513b298e267b22eb6cb559c103d56f155aa9a49097221f0b6/frozenlist-1.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:716a9973a2cc963160394f701964fe25012600f3d311f60c790400b00e568b61", size = 47735, upload-time = "2025-06-09T22:59:48.133Z" },
+ { url = "https://files.pythonhosted.org/packages/37/12/9d07fa18971a44150593de56b2f2947c46604819976784bcf6ea0d5db43b/frozenlist-1.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0fd1bad056a3600047fb9462cff4c5322cebc59ebf5d0a3725e0ee78955001d", size = 46775, upload-time = "2025-06-09T22:59:49.564Z" },
+ { url = "https://files.pythonhosted.org/packages/70/34/f73539227e06288fcd1f8a76853e755b2b48bca6747e99e283111c18bcd4/frozenlist-1.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3789ebc19cb811163e70fe2bd354cea097254ce6e707ae42e56f45e31e96cb8e", size = 224644, upload-time = "2025-06-09T22:59:51.35Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/68/c1d9c2f4a6e438e14613bad0f2973567586610cc22dcb1e1241da71de9d3/frozenlist-1.7.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:af369aa35ee34f132fcfad5be45fbfcde0e3a5f6a1ec0712857f286b7d20cca9", size = 222125, upload-time = "2025-06-09T22:59:52.884Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/d0/98e8f9a515228d708344d7c6986752be3e3192d1795f748c24bcf154ad99/frozenlist-1.7.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac64b6478722eeb7a3313d494f8342ef3478dff539d17002f849101b212ef97c", size = 233455, upload-time = "2025-06-09T22:59:54.74Z" },
+ { url = "https://files.pythonhosted.org/packages/79/df/8a11bcec5600557f40338407d3e5bea80376ed1c01a6c0910fcfdc4b8993/frozenlist-1.7.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f89f65d85774f1797239693cef07ad4c97fdd0639544bad9ac4b869782eb1981", size = 227339, upload-time = "2025-06-09T22:59:56.187Z" },
+ { url = "https://files.pythonhosted.org/packages/50/82/41cb97d9c9a5ff94438c63cc343eb7980dac4187eb625a51bdfdb7707314/frozenlist-1.7.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1073557c941395fdfcfac13eb2456cb8aad89f9de27bae29fabca8e563b12615", size = 212969, upload-time = "2025-06-09T22:59:57.604Z" },
+ { url = "https://files.pythonhosted.org/packages/13/47/f9179ee5ee4f55629e4f28c660b3fdf2775c8bfde8f9c53f2de2d93f52a9/frozenlist-1.7.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ed8d2fa095aae4bdc7fdd80351009a48d286635edffee66bf865e37a9125c50", size = 222862, upload-time = "2025-06-09T22:59:59.498Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/52/df81e41ec6b953902c8b7e3a83bee48b195cb0e5ec2eabae5d8330c78038/frozenlist-1.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:24c34bea555fe42d9f928ba0a740c553088500377448febecaa82cc3e88aa1fa", size = 222492, upload-time = "2025-06-09T23:00:01.026Z" },
+ { url = "https://files.pythonhosted.org/packages/84/17/30d6ea87fa95a9408245a948604b82c1a4b8b3e153cea596421a2aef2754/frozenlist-1.7.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:69cac419ac6a6baad202c85aaf467b65ac860ac2e7f2ac1686dc40dbb52f6577", size = 238250, upload-time = "2025-06-09T23:00:03.401Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/00/ecbeb51669e3c3df76cf2ddd66ae3e48345ec213a55e3887d216eb4fbab3/frozenlist-1.7.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:960d67d0611f4c87da7e2ae2eacf7ea81a5be967861e0c63cf205215afbfac59", size = 218720, upload-time = "2025-06-09T23:00:05.282Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/c0/c224ce0e0eb31cc57f67742071bb470ba8246623c1823a7530be0e76164c/frozenlist-1.7.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:41be2964bd4b15bf575e5daee5a5ce7ed3115320fb3c2b71fca05582ffa4dc9e", size = 232585, upload-time = "2025-06-09T23:00:07.962Z" },
+ { url = "https://files.pythonhosted.org/packages/55/3c/34cb694abf532f31f365106deebdeac9e45c19304d83cf7d51ebbb4ca4d1/frozenlist-1.7.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:46d84d49e00c9429238a7ce02dc0be8f6d7cd0cd405abd1bebdc991bf27c15bd", size = 234248, upload-time = "2025-06-09T23:00:09.428Z" },
+ { url = "https://files.pythonhosted.org/packages/98/c0/2052d8b6cecda2e70bd81299e3512fa332abb6dcd2969b9c80dfcdddbf75/frozenlist-1.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:15900082e886edb37480335d9d518cec978afc69ccbc30bd18610b7c1b22a718", size = 221621, upload-time = "2025-06-09T23:00:11.32Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/bf/7dcebae315436903b1d98ffb791a09d674c88480c158aa171958a3ac07f0/frozenlist-1.7.0-cp310-cp310-win32.whl", hash = "sha256:400ddd24ab4e55014bba442d917203c73b2846391dd42ca5e38ff52bb18c3c5e", size = 39578, upload-time = "2025-06-09T23:00:13.526Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/5f/f69818f017fa9a3d24d1ae39763e29b7f60a59e46d5f91b9c6b21622f4cd/frozenlist-1.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:6eb93efb8101ef39d32d50bce242c84bcbddb4f7e9febfa7b524532a239b4464", size = 43830, upload-time = "2025-06-09T23:00:14.98Z" },
+ { url = "https://files.pythonhosted.org/packages/34/7e/803dde33760128acd393a27eb002f2020ddb8d99d30a44bfbaab31c5f08a/frozenlist-1.7.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:aa51e147a66b2d74de1e6e2cf5921890de6b0f4820b257465101d7f37b49fb5a", size = 82251, upload-time = "2025-06-09T23:00:16.279Z" },
+ { url = "https://files.pythonhosted.org/packages/75/a9/9c2c5760b6ba45eae11334db454c189d43d34a4c0b489feb2175e5e64277/frozenlist-1.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9b35db7ce1cd71d36ba24f80f0c9e7cff73a28d7a74e91fe83e23d27c7828750", size = 48183, upload-time = "2025-06-09T23:00:17.698Z" },
+ { url = "https://files.pythonhosted.org/packages/47/be/4038e2d869f8a2da165f35a6befb9158c259819be22eeaf9c9a8f6a87771/frozenlist-1.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34a69a85e34ff37791e94542065c8416c1afbf820b68f720452f636d5fb990cd", size = 47107, upload-time = "2025-06-09T23:00:18.952Z" },
+ { url = "https://files.pythonhosted.org/packages/79/26/85314b8a83187c76a37183ceed886381a5f992975786f883472fcb6dc5f2/frozenlist-1.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a646531fa8d82c87fe4bb2e596f23173caec9185bfbca5d583b4ccfb95183e2", size = 237333, upload-time = "2025-06-09T23:00:20.275Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/fd/e5b64f7d2c92a41639ffb2ad44a6a82f347787abc0c7df5f49057cf11770/frozenlist-1.7.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:79b2ffbba483f4ed36a0f236ccb85fbb16e670c9238313709638167670ba235f", size = 231724, upload-time = "2025-06-09T23:00:21.705Z" },
+ { url = "https://files.pythonhosted.org/packages/20/fb/03395c0a43a5976af4bf7534759d214405fbbb4c114683f434dfdd3128ef/frozenlist-1.7.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a26f205c9ca5829cbf82bb2a84b5c36f7184c4316617d7ef1b271a56720d6b30", size = 245842, upload-time = "2025-06-09T23:00:23.148Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/15/c01c8e1dffdac5d9803507d824f27aed2ba76b6ed0026fab4d9866e82f1f/frozenlist-1.7.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bcacfad3185a623fa11ea0e0634aac7b691aa925d50a440f39b458e41c561d98", size = 239767, upload-time = "2025-06-09T23:00:25.103Z" },
+ { url = "https://files.pythonhosted.org/packages/14/99/3f4c6fe882c1f5514b6848aa0a69b20cb5e5d8e8f51a339d48c0e9305ed0/frozenlist-1.7.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72c1b0fe8fe451b34f12dce46445ddf14bd2a5bcad7e324987194dc8e3a74c86", size = 224130, upload-time = "2025-06-09T23:00:27.061Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/83/220a374bd7b2aeba9d0725130665afe11de347d95c3620b9b82cc2fcab97/frozenlist-1.7.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61d1a5baeaac6c0798ff6edfaeaa00e0e412d49946c53fae8d4b8e8b3566c4ae", size = 235301, upload-time = "2025-06-09T23:00:29.02Z" },
+ { url = "https://files.pythonhosted.org/packages/03/3c/3e3390d75334a063181625343e8daab61b77e1b8214802cc4e8a1bb678fc/frozenlist-1.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7edf5c043c062462f09b6820de9854bf28cc6cc5b6714b383149745e287181a8", size = 234606, upload-time = "2025-06-09T23:00:30.514Z" },
+ { url = "https://files.pythonhosted.org/packages/23/1e/58232c19608b7a549d72d9903005e2d82488f12554a32de2d5fb59b9b1ba/frozenlist-1.7.0-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:d50ac7627b3a1bd2dcef6f9da89a772694ec04d9a61b66cf87f7d9446b4a0c31", size = 248372, upload-time = "2025-06-09T23:00:31.966Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/a4/e4a567e01702a88a74ce8a324691e62a629bf47d4f8607f24bf1c7216e7f/frozenlist-1.7.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:ce48b2fece5aeb45265bb7a58259f45027db0abff478e3077e12b05b17fb9da7", size = 229860, upload-time = "2025-06-09T23:00:33.375Z" },
+ { url = "https://files.pythonhosted.org/packages/73/a6/63b3374f7d22268b41a9db73d68a8233afa30ed164c46107b33c4d18ecdd/frozenlist-1.7.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:fe2365ae915a1fafd982c146754e1de6ab3478def8a59c86e1f7242d794f97d5", size = 245893, upload-time = "2025-06-09T23:00:35.002Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/eb/d18b3f6e64799a79673c4ba0b45e4cfbe49c240edfd03a68be20002eaeaa/frozenlist-1.7.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:45a6f2fdbd10e074e8814eb98b05292f27bad7d1883afbe009d96abdcf3bc898", size = 246323, upload-time = "2025-06-09T23:00:36.468Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/f5/720f3812e3d06cd89a1d5db9ff6450088b8f5c449dae8ffb2971a44da506/frozenlist-1.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:21884e23cffabb157a9dd7e353779077bf5b8f9a58e9b262c6caad2ef5f80a56", size = 233149, upload-time = "2025-06-09T23:00:37.963Z" },
+ { url = "https://files.pythonhosted.org/packages/69/68/03efbf545e217d5db8446acfd4c447c15b7c8cf4dbd4a58403111df9322d/frozenlist-1.7.0-cp311-cp311-win32.whl", hash = "sha256:284d233a8953d7b24f9159b8a3496fc1ddc00f4db99c324bd5fb5f22d8698ea7", size = 39565, upload-time = "2025-06-09T23:00:39.753Z" },
+ { url = "https://files.pythonhosted.org/packages/58/17/fe61124c5c333ae87f09bb67186d65038834a47d974fc10a5fadb4cc5ae1/frozenlist-1.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:387cbfdcde2f2353f19c2f66bbb52406d06ed77519ac7ee21be0232147c2592d", size = 44019, upload-time = "2025-06-09T23:00:40.988Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" },
+ { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" },
+ { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" },
+ { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" },
+ { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" },
+ { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" },
+ { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" },
+ { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" },
+ { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" },
+ { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" },
+ { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" },
+ { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" },
+ { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" },
+ { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" },
+ { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" },
+ { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" },
+ { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" },
+ { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" },
+ { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" },
+ { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" },
+ { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" },
+ { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" },
+]
+
+[[package]]
+name = "fsspec"
+version = "2025.7.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/8b/02/0835e6ab9cfc03916fe3f78c0956cfcdb6ff2669ffa6651065d5ebf7fc98/fsspec-2025.7.0.tar.gz", hash = "sha256:786120687ffa54b8283d942929540d8bc5ccfa820deb555a2b5d0ed2b737bf58", size = 304432, upload-time = "2025-07-15T16:05:21.19Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2f/e0/014d5d9d7a4564cf1c40b5039bc882db69fd881111e03ab3657ac0b218e2/fsspec-2025.7.0-py3-none-any.whl", hash = "sha256:8b012e39f63c7d5f10474de957f3ab793b47b45ae7d39f2fb735f8bbe25c0e21", size = 199597, upload-time = "2025-07-15T16:05:19.529Z" },
+]
+
+[[package]]
+name = "ghp-import"
+version = "2.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "python-dateutil" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/d9/29/d40217cbe2f6b1359e00c6c307bb3fc876ba74068cbab3dde77f03ca0dc4/ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343", size = 10943, upload-time = "2022-05-02T15:47:16.11Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f7/ec/67fbef5d497f86283db54c22eec6f6140243aae73265799baaaa19cd17fb/ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619", size = 11034, upload-time = "2022-05-02T15:47:14.552Z" },
+]
+
+[[package]]
+name = "griffe"
+version = "1.11.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/18/0f/9cbd56eb047de77a4b93d8d4674e70cd19a1ff64d7410651b514a1ed93d5/griffe-1.11.1.tar.gz", hash = "sha256:d54ffad1ec4da9658901eb5521e9cddcdb7a496604f67d8ae71077f03f549b7e", size = 410996, upload-time = "2025-08-11T11:38:35.528Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e6/a3/451ffd422ce143758a39c0290aaa7c9727ecc2bcc19debd7a8f3c6075ce9/griffe-1.11.1-py3-none-any.whl", hash = "sha256:5799cf7c513e4b928cfc6107ee6c4bc4a92e001f07022d97fd8dee2f612b6064", size = 138745, upload-time = "2025-08-11T11:38:33.964Z" },
+]
+
+[[package]]
+name = "h11"
+version = "0.16.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
+]
+
+[[package]]
+name = "hf-xet"
+version = "1.1.7"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b2/0a/a0f56735940fde6dd627602fec9ab3bad23f66a272397560abd65aba416e/hf_xet-1.1.7.tar.gz", hash = "sha256:20cec8db4561338824a3b5f8c19774055b04a8df7fff0cb1ff2cb1a0c1607b80", size = 477719, upload-time = "2025-08-06T00:30:55.741Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b1/7c/8d7803995caf14e7d19a392a486a040f923e2cfeff824e9b800b92072f76/hf_xet-1.1.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:60dae4b44d520819e54e216a2505685248ec0adbdb2dd4848b17aa85a0375cde", size = 2761743, upload-time = "2025-08-06T00:30:50.634Z" },
+ { url = "https://files.pythonhosted.org/packages/51/a3/fa5897099454aa287022a34a30e68dbff0e617760f774f8bd1db17f06bd4/hf_xet-1.1.7-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:b109f4c11e01c057fc82004c9e51e6cdfe2cb230637644ade40c599739067b2e", size = 2624331, upload-time = "2025-08-06T00:30:49.212Z" },
+ { url = "https://files.pythonhosted.org/packages/86/50/2446a132267e60b8a48b2e5835d6e24fd988000d0f5b9b15ebd6d64ef769/hf_xet-1.1.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6efaaf1a5a9fc3a501d3e71e88a6bfebc69ee3a716d0e713a931c8b8d920038f", size = 3183844, upload-time = "2025-08-06T00:30:47.582Z" },
+ { url = "https://files.pythonhosted.org/packages/20/8f/ccc670616bb9beee867c6bb7139f7eab2b1370fe426503c25f5cbb27b148/hf_xet-1.1.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:751571540f9c1fbad9afcf222a5fb96daf2384bf821317b8bfb0c59d86078513", size = 3074209, upload-time = "2025-08-06T00:30:45.509Z" },
+ { url = "https://files.pythonhosted.org/packages/21/0a/4c30e1eb77205565b854f5e4a82cf1f056214e4dc87f2918ebf83d47ae14/hf_xet-1.1.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:18b61bbae92d56ae731b92087c44efcac216071182c603fc535f8e29ec4b09b8", size = 3239602, upload-time = "2025-08-06T00:30:52.41Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/1e/fc7e9baf14152662ef0b35fa52a6e889f770a7ed14ac239de3c829ecb47e/hf_xet-1.1.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:713f2bff61b252f8523739969f247aa354ad8e6d869b8281e174e2ea1bb8d604", size = 3348184, upload-time = "2025-08-06T00:30:54.105Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/73/e354eae84ceff117ec3560141224724794828927fcc013c5b449bf0b8745/hf_xet-1.1.7-cp37-abi3-win_amd64.whl", hash = "sha256:2e356da7d284479ae0f1dea3cf5a2f74fdf925d6dca84ac4341930d892c7cb34", size = 2820008, upload-time = "2025-08-06T00:30:57.056Z" },
+]
+
+[[package]]
+name = "huggingface-hub"
+version = "0.34.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "filelock" },
+ { name = "fsspec" },
+ { name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" },
+ { name = "packaging" },
+ { name = "pyyaml" },
+ { name = "requests" },
+ { name = "tqdm" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/45/c9/bdbe19339f76d12985bc03572f330a01a93c04dffecaaea3061bdd7fb892/huggingface_hub-0.34.4.tar.gz", hash = "sha256:a4228daa6fb001be3f4f4bdaf9a0db00e1739235702848df00885c9b5742c85c", size = 459768, upload-time = "2025-08-08T09:14:52.365Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/39/7b/bb06b061991107cd8783f300adff3e7b7f284e330fd82f507f2a1417b11d/huggingface_hub-0.34.4-py3-none-any.whl", hash = "sha256:9b365d781739c93ff90c359844221beef048403f1bc1f1c123c191257c3c890a", size = 561452, upload-time = "2025-08-08T09:14:50.159Z" },
+]
+
+[[package]]
+name = "humanfriendly"
+version = "10.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pyreadline3", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/cc/3f/2c29224acb2e2df4d2046e4c73ee2662023c58ff5b113c4c1adac0886c43/humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc", size = 360702, upload-time = "2021-09-17T21:40:43.31Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477", size = 86794, upload-time = "2021-09-17T21:40:39.897Z" },
+]
+
+[[package]]
+name = "identify"
+version = "2.6.13"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/82/ca/ffbabe3635bb839aa36b3a893c91a9b0d368cb4d8073e03a12896970af82/identify-2.6.13.tar.gz", hash = "sha256:da8d6c828e773620e13bfa86ea601c5a5310ba4bcd65edf378198b56a1f9fb32", size = 99243, upload-time = "2025-08-09T19:35:00.6Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e7/ce/461b60a3ee109518c055953729bf9ed089a04db895d47e95444071dcdef2/identify-2.6.13-py2.py3-none-any.whl", hash = "sha256:60381139b3ae39447482ecc406944190f690d4a2997f2584062089848361b33b", size = 99153, upload-time = "2025-08-09T19:34:59.1Z" },
+]
+
+[[package]]
+name = "idna"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" },
+]
+
+[[package]]
+name = "iniconfig"
+version = "2.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" },
+]
+
+[[package]]
+name = "jinja2"
+version = "3.1.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markupsafe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
+]
+
+[[package]]
+name = "markdown"
+version = "3.8.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d7/c2/4ab49206c17f75cb08d6311171f2d65798988db4360c4d1485bd0eedd67c/markdown-3.8.2.tar.gz", hash = "sha256:247b9a70dd12e27f67431ce62523e675b866d254f900c4fe75ce3dda62237c45", size = 362071, upload-time = "2025-06-19T17:12:44.483Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/96/2b/34cc11786bc00d0f04d0f5fdc3a2b1ae0b6239eef72d3d345805f9ad92a1/markdown-3.8.2-py3-none-any.whl", hash = "sha256:5c83764dbd4e00bdd94d85a19b8d55ccca20fe35b2e678a1422b380324dd5f24", size = 106827, upload-time = "2025-06-19T17:12:42.994Z" },
+]
+
+[[package]]
+name = "markdown-it-py"
+version = "3.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "mdurl" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" },
+]
+
+[[package]]
+name = "markupsafe"
+version = "3.0.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/90/d08277ce111dd22f77149fd1a5d4653eeb3b3eaacbdfcbae5afb2600eebd/MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8", size = 14357, upload-time = "2024-10-18T15:20:51.44Z" },
+ { url = "https://files.pythonhosted.org/packages/04/e1/6e2194baeae0bca1fae6629dc0cbbb968d4d941469cbab11a3872edff374/MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158", size = 12393, upload-time = "2024-10-18T15:20:52.426Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/69/35fa85a8ece0a437493dc61ce0bb6d459dcba482c34197e3efc829aa357f/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579", size = 21732, upload-time = "2024-10-18T15:20:53.578Z" },
+ { url = "https://files.pythonhosted.org/packages/22/35/137da042dfb4720b638d2937c38a9c2df83fe32d20e8c8f3185dbfef05f7/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d", size = 20866, upload-time = "2024-10-18T15:20:55.06Z" },
+ { url = "https://files.pythonhosted.org/packages/29/28/6d029a903727a1b62edb51863232152fd335d602def598dade38996887f0/MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb", size = 20964, upload-time = "2024-10-18T15:20:55.906Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/cd/07438f95f83e8bc028279909d9c9bd39e24149b0d60053a97b2bc4f8aa51/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b", size = 21977, upload-time = "2024-10-18T15:20:57.189Z" },
+ { url = "https://files.pythonhosted.org/packages/29/01/84b57395b4cc062f9c4c55ce0df7d3108ca32397299d9df00fedd9117d3d/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c", size = 21366, upload-time = "2024-10-18T15:20:58.235Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/6e/61ebf08d8940553afff20d1fb1ba7294b6f8d279df9fd0c0db911b4bbcfd/MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171", size = 21091, upload-time = "2024-10-18T15:20:59.235Z" },
+ { url = "https://files.pythonhosted.org/packages/11/23/ffbf53694e8c94ebd1e7e491de185124277964344733c45481f32ede2499/MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50", size = 15065, upload-time = "2024-10-18T15:21:00.307Z" },
+ { url = "https://files.pythonhosted.org/packages/44/06/e7175d06dd6e9172d4a69a72592cb3f7a996a9c396eee29082826449bbc3/MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a", size = 15514, upload-time = "2024-10-18T15:21:01.122Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/28/bbf83e3f76936960b850435576dd5e67034e200469571be53f69174a2dfd/MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d", size = 14353, upload-time = "2024-10-18T15:21:02.187Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/30/316d194b093cde57d448a4c3209f22e3046c5bb2fb0820b118292b334be7/MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93", size = 12392, upload-time = "2024-10-18T15:21:02.941Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/96/9cdafba8445d3a53cae530aaf83c38ec64c4d5427d975c974084af5bc5d2/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832", size = 23984, upload-time = "2024-10-18T15:21:03.953Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/a4/aefb044a2cd8d7334c8a47d3fb2c9f328ac48cb349468cc31c20b539305f/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84", size = 23120, upload-time = "2024-10-18T15:21:06.495Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/21/5e4851379f88f3fad1de30361db501300d4f07bcad047d3cb0449fc51f8c/MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca", size = 23032, upload-time = "2024-10-18T15:21:07.295Z" },
+ { url = "https://files.pythonhosted.org/packages/00/7b/e92c64e079b2d0d7ddf69899c98842f3f9a60a1ae72657c89ce2655c999d/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798", size = 24057, upload-time = "2024-10-18T15:21:08.073Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/ac/46f960ca323037caa0a10662ef97d0a4728e890334fc156b9f9e52bcc4ca/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e", size = 23359, upload-time = "2024-10-18T15:21:09.318Z" },
+ { url = "https://files.pythonhosted.org/packages/69/84/83439e16197337b8b14b6a5b9c2105fff81d42c2a7c5b58ac7b62ee2c3b1/MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4", size = 23306, upload-time = "2024-10-18T15:21:10.185Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/34/a15aa69f01e2181ed8d2b685c0d2f6655d5cca2c4db0ddea775e631918cd/MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d", size = 15094, upload-time = "2024-10-18T15:21:11.005Z" },
+ { url = "https://files.pythonhosted.org/packages/da/b8/3a3bd761922d416f3dc5d00bfbed11f66b1ab89a0c2b6e887240a30b0f6b/MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b", size = 15521, upload-time = "2024-10-18T15:21:12.911Z" },
+ { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" },
+ { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" },
+ { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" },
+ { url = "https://files.pythonhosted.org/packages/83/0e/67eb10a7ecc77a0c2bbe2b0235765b98d164d81600746914bebada795e97/MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd", size = 14274, upload-time = "2024-10-18T15:21:24.577Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/6d/9409f3684d3335375d04e5f05744dfe7e9f120062c9857df4ab490a1031a/MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430", size = 12352, upload-time = "2024-10-18T15:21:25.382Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/f5/6eadfcd3885ea85fe2a7c128315cc1bb7241e1987443d78c8fe712d03091/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094", size = 24122, upload-time = "2024-10-18T15:21:26.199Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/91/96cf928db8236f1bfab6ce15ad070dfdd02ed88261c2afafd4b43575e9e9/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396", size = 23085, upload-time = "2024-10-18T15:21:27.029Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/cf/c9d56af24d56ea04daae7ac0940232d31d5a8354f2b457c6d856b2057d69/MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79", size = 22978, upload-time = "2024-10-18T15:21:27.846Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/9f/8619835cd6a711d6272d62abb78c033bda638fdc54c4e7f4272cf1c0962b/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a", size = 24208, upload-time = "2024-10-18T15:21:28.744Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/bf/176950a1792b2cd2102b8ffeb5133e1ed984547b75db47c25a67d3359f77/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca", size = 23357, upload-time = "2024-10-18T15:21:29.545Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/4f/9a02c1d335caabe5c4efb90e1b6e8ee944aa245c1aaaab8e8a618987d816/MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c", size = 23344, upload-time = "2024-10-18T15:21:30.366Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/55/c271b57db36f748f0e04a759ace9f8f759ccf22b4960c270c78a394f58be/MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1", size = 15101, upload-time = "2024-10-18T15:21:31.207Z" },
+ { url = "https://files.pythonhosted.org/packages/29/88/07df22d2dd4df40aba9f3e402e6dc1b8ee86297dddbad4872bd5e7b0094f/MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f", size = 15603, upload-time = "2024-10-18T15:21:32.032Z" },
+ { url = "https://files.pythonhosted.org/packages/62/6a/8b89d24db2d32d433dffcd6a8779159da109842434f1dd2f6e71f32f738c/MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c", size = 14510, upload-time = "2024-10-18T15:21:33.625Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/06/a10f955f70a2e5a9bf78d11a161029d278eeacbd35ef806c3fd17b13060d/MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb", size = 12486, upload-time = "2024-10-18T15:21:34.611Z" },
+ { url = "https://files.pythonhosted.org/packages/34/cf/65d4a571869a1a9078198ca28f39fba5fbb910f952f9dbc5220afff9f5e6/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c", size = 25480, upload-time = "2024-10-18T15:21:35.398Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/e3/90e9651924c430b885468b56b3d597cabf6d72be4b24a0acd1fa0e12af67/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d", size = 23914, upload-time = "2024-10-18T15:21:36.231Z" },
+ { url = "https://files.pythonhosted.org/packages/66/8c/6c7cf61f95d63bb866db39085150df1f2a5bd3335298f14a66b48e92659c/MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe", size = 23796, upload-time = "2024-10-18T15:21:37.073Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/35/cbe9238ec3f47ac9a7c8b3df7a808e7cb50fe149dc7039f5f454b3fba218/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5", size = 25473, upload-time = "2024-10-18T15:21:37.932Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/32/7621a4382488aa283cc05e8984a9c219abad3bca087be9ec77e89939ded9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a", size = 24114, upload-time = "2024-10-18T15:21:39.799Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/80/0985960e4b89922cb5a0bac0ed39c5b96cbc1a536a99f30e8c220a996ed9/MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9", size = 24098, upload-time = "2024-10-18T15:21:40.813Z" },
+ { url = "https://files.pythonhosted.org/packages/82/78/fedb03c7d5380df2427038ec8d973587e90561b2d90cd472ce9254cf348b/MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6", size = 15208, upload-time = "2024-10-18T15:21:41.814Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/65/6079a46068dfceaeabb5dcad6d674f5f5c61a6fa5673746f42a9f4c233b3/MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f", size = 15739, upload-time = "2024-10-18T15:21:42.784Z" },
+]
+
+[[package]]
+name = "marshmallow"
+version = "4.0.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "backports-datetime-fromisoformat", marker = "python_full_version < '3.11'" },
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1e/ff/26df5a9f5ac57ccf693a5854916ab47243039d2aa9e0fe5f5a0331e7b74b/marshmallow-4.0.0.tar.gz", hash = "sha256:3b6e80aac299a7935cfb97ed01d1854fb90b5079430969af92118ea1b12a8d55", size = 220507, upload-time = "2025-04-17T02:25:54.925Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d6/26/6cc45d156f44dbe1d5696d9e54042e4dcaf7b946c0b86df6a97d29706f32/marshmallow-4.0.0-py3-none-any.whl", hash = "sha256:e7b0528337e9990fd64950f8a6b3a1baabed09ad17a0dfb844d701151f92d203", size = 48420, upload-time = "2025-04-17T02:25:53.375Z" },
+]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
+]
+
+[[package]]
+name = "mergedeep"
+version = "1.3.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/3a/41/580bb4006e3ed0361b8151a01d324fb03f420815446c7def45d02f74c270/mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8", size = 4661, upload-time = "2021-02-05T18:55:30.623Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2c/19/04f9b178c2d8a15b076c8b5140708fa6ffc5601fb6f1e975537072df5b2a/mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307", size = 6354, upload-time = "2021-02-05T18:55:29.583Z" },
+]
+
+[[package]]
+name = "mkdocs"
+version = "1.6.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+ { name = "ghp-import" },
+ { name = "jinja2" },
+ { name = "markdown" },
+ { name = "markupsafe" },
+ { name = "mergedeep" },
+ { name = "mkdocs-get-deps" },
+ { name = "packaging" },
+ { name = "pathspec" },
+ { name = "pyyaml" },
+ { name = "pyyaml-env-tag" },
+ { name = "watchdog" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/bc/c6/bbd4f061bd16b378247f12953ffcb04786a618ce5e904b8c5a01a0309061/mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2", size = 3889159, upload-time = "2024-08-30T12:24:06.899Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/22/5b/dbc6a8cddc9cfa9c4971d59fb12bb8d42e161b7e7f8cc89e49137c5b279c/mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e", size = 3864451, upload-time = "2024-08-30T12:24:05.054Z" },
+]
+
+[[package]]
+name = "mkdocs-autorefs"
+version = "1.4.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markdown" },
+ { name = "markupsafe" },
+ { name = "mkdocs" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/47/0c/c9826f35b99c67fa3a7cddfa094c1a6c43fafde558c309c6e4403e5b37dc/mkdocs_autorefs-1.4.2.tar.gz", hash = "sha256:e2ebe1abd2b67d597ed19378c0fff84d73d1dbce411fce7a7cc6f161888b6749", size = 54961, upload-time = "2025-05-20T13:09:09.886Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/87/dc/fc063b78f4b769d1956319351704e23ebeba1e9e1d6a41b4b602325fd7e4/mkdocs_autorefs-1.4.2-py3-none-any.whl", hash = "sha256:83d6d777b66ec3c372a1aad4ae0cf77c243ba5bcda5bf0c6b8a2c5e7a3d89f13", size = 24969, upload-time = "2025-05-20T13:09:08.237Z" },
+]
+
+[[package]]
+name = "mkdocs-get-deps"
+version = "0.2.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "mergedeep" },
+ { name = "platformdirs" },
+ { name = "pyyaml" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/98/f5/ed29cd50067784976f25ed0ed6fcd3c2ce9eb90650aa3b2796ddf7b6870b/mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c", size = 10239, upload-time = "2023-11-20T17:51:09.981Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9f/d4/029f984e8d3f3b6b726bd33cafc473b75e9e44c0f7e80a5b29abc466bdea/mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134", size = 9521, upload-time = "2023-11-20T17:51:08.587Z" },
+]
+
+[[package]]
+name = "mkdocs-material"
+version = "9.6.16"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "babel" },
+ { name = "backrefs" },
+ { name = "colorama" },
+ { name = "jinja2" },
+ { name = "markdown" },
+ { name = "mkdocs" },
+ { name = "mkdocs-material-extensions" },
+ { name = "paginate" },
+ { name = "pygments" },
+ { name = "pymdown-extensions" },
+ { name = "requests" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/dd/84/aec27a468c5e8c27689c71b516fb5a0d10b8fca45b9ad2dd9d6e43bc4296/mkdocs_material-9.6.16.tar.gz", hash = "sha256:d07011df4a5c02ee0877496d9f1bfc986cfb93d964799b032dd99fe34c0e9d19", size = 4028828, upload-time = "2025-07-26T15:53:47.542Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/65/f4/90ad67125b4dd66e7884e4dbdfab82e3679eb92b751116f8bb25ccfe2f0c/mkdocs_material-9.6.16-py3-none-any.whl", hash = "sha256:8d1a1282b892fe1fdf77bfeb08c485ba3909dd743c9ba69a19a40f637c6ec18c", size = 9223743, upload-time = "2025-07-26T15:53:44.236Z" },
+]
+
+[[package]]
+name = "mkdocs-material-extensions"
+version = "1.3.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/79/9b/9b4c96d6593b2a541e1cb8b34899a6d021d208bb357042823d4d2cabdbe7/mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443", size = 11847, upload-time = "2023-11-22T19:09:45.208Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5b/54/662a4743aa81d9582ee9339d4ffa3c8fd40a4965e033d77b9da9774d3960/mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31", size = 8728, upload-time = "2023-11-22T19:09:43.465Z" },
+]
+
+[[package]]
+name = "mkdocstrings"
+version = "0.30.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jinja2" },
+ { name = "markdown" },
+ { name = "markupsafe" },
+ { name = "mkdocs" },
+ { name = "mkdocs-autorefs" },
+ { name = "pymdown-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/e2/0a/7e4776217d4802009c8238c75c5345e23014a4706a8414a62c0498858183/mkdocstrings-0.30.0.tar.gz", hash = "sha256:5d8019b9c31ddacd780b6784ffcdd6f21c408f34c0bd1103b5351d609d5b4444", size = 106597, upload-time = "2025-07-22T23:48:45.998Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/de/b4/3c5eac68f31e124a55d255d318c7445840fa1be55e013f507556d6481913/mkdocstrings-0.30.0-py3-none-any.whl", hash = "sha256:ae9e4a0d8c1789697ac776f2e034e2ddd71054ae1cf2c2bb1433ccfd07c226f2", size = 36579, upload-time = "2025-07-22T23:48:44.152Z" },
+]
+
+[package.optional-dependencies]
+python = [
+ { name = "mkdocstrings-python" },
+]
+
+[[package]]
+name = "mkdocstrings-python"
+version = "1.16.12"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "griffe" },
+ { name = "mkdocs-autorefs" },
+ { name = "mkdocstrings" },
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/bf/ed/b886f8c714fd7cccc39b79646b627dbea84cd95c46be43459ef46852caf0/mkdocstrings_python-1.16.12.tar.gz", hash = "sha256:9b9eaa066e0024342d433e332a41095c4e429937024945fea511afe58f63175d", size = 206065, upload-time = "2025-06-03T12:52:49.276Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3b/dd/a24ee3de56954bfafb6ede7cd63c2413bb842cc48eb45e41c43a05a33074/mkdocstrings_python-1.16.12-py3-none-any.whl", hash = "sha256:22ded3a63b3d823d57457a70ff9860d5a4de9e8b1e482876fc9baabaf6f5f374", size = 124287, upload-time = "2025-06-03T12:52:47.819Z" },
+]
+
+[[package]]
+name = "mpmath"
+version = "1.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" },
+]
+
+[[package]]
+name = "multidict"
+version = "6.6.3"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3d/2c/5dad12e82fbdf7470f29bff2171484bf07cb3b16ada60a6589af8f376440/multidict-6.6.3.tar.gz", hash = "sha256:798a9eb12dab0a6c2e29c1de6f3468af5cb2da6053a20dfa3344907eed0937cc", size = 101006, upload-time = "2025-06-30T15:53:46.929Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0b/67/414933982bce2efce7cbcb3169eaaf901e0f25baec69432b4874dfb1f297/multidict-6.6.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a2be5b7b35271f7fff1397204ba6708365e3d773579fe2a30625e16c4b4ce817", size = 77017, upload-time = "2025-06-30T15:50:58.931Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/fe/d8a3ee1fad37dc2ef4f75488b0d9d4f25bf204aad8306cbab63d97bff64a/multidict-6.6.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:12f4581d2930840295c461764b9a65732ec01250b46c6b2c510d7ee68872b140", size = 44897, upload-time = "2025-06-30T15:51:00.999Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/e0/265d89af8c98240265d82b8cbcf35897f83b76cd59ee3ab3879050fd8c45/multidict-6.6.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dd7793bab517e706c9ed9d7310b06c8672fd0aeee5781bfad612f56b8e0f7d14", size = 44574, upload-time = "2025-06-30T15:51:02.449Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/05/6b759379f7e8e04ccc97cfb2a5dcc5cdbd44a97f072b2272dc51281e6a40/multidict-6.6.3-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:72d8815f2cd3cf3df0f83cac3f3ef801d908b2d90409ae28102e0553af85545a", size = 225729, upload-time = "2025-06-30T15:51:03.794Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/f5/8d5a15488edd9a91fa4aad97228d785df208ed6298580883aa3d9def1959/multidict-6.6.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:531e331a2ee53543ab32b16334e2deb26f4e6b9b28e41f8e0c87e99a6c8e2d69", size = 242515, upload-time = "2025-06-30T15:51:05.002Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/b5/a8f317d47d0ac5bb746d6d8325885c8967c2a8ce0bb57be5399e3642cccb/multidict-6.6.3-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:42ca5aa9329a63be8dc49040f63817d1ac980e02eeddba763a9ae5b4027b9c9c", size = 222224, upload-time = "2025-06-30T15:51:06.148Z" },
+ { url = "https://files.pythonhosted.org/packages/76/88/18b2a0d5e80515fa22716556061189c2853ecf2aa2133081ebbe85ebea38/multidict-6.6.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:208b9b9757060b9faa6f11ab4bc52846e4f3c2fb8b14d5680c8aac80af3dc751", size = 253124, upload-time = "2025-06-30T15:51:07.375Z" },
+ { url = "https://files.pythonhosted.org/packages/62/bf/ebfcfd6b55a1b05ef16d0775ae34c0fe15e8dab570d69ca9941073b969e7/multidict-6.6.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:acf6b97bd0884891af6a8b43d0f586ab2fcf8e717cbd47ab4bdddc09e20652d8", size = 251529, upload-time = "2025-06-30T15:51:08.691Z" },
+ { url = "https://files.pythonhosted.org/packages/44/11/780615a98fd3775fc309d0234d563941af69ade2df0bb82c91dda6ddaea1/multidict-6.6.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:68e9e12ed00e2089725669bdc88602b0b6f8d23c0c95e52b95f0bc69f7fe9b55", size = 241627, upload-time = "2025-06-30T15:51:10.605Z" },
+ { url = "https://files.pythonhosted.org/packages/28/3d/35f33045e21034b388686213752cabc3a1b9d03e20969e6fa8f1b1d82db1/multidict-6.6.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:05db2f66c9addb10cfa226e1acb363450fab2ff8a6df73c622fefe2f5af6d4e7", size = 239351, upload-time = "2025-06-30T15:51:12.18Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/cc/ff84c03b95b430015d2166d9aae775a3985d757b94f6635010d0038d9241/multidict-6.6.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:0db58da8eafb514db832a1b44f8fa7906fdd102f7d982025f816a93ba45e3dcb", size = 233429, upload-time = "2025-06-30T15:51:13.533Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/f0/8cd49a0b37bdea673a4b793c2093f2f4ba8e7c9d6d7c9bd672fd6d38cd11/multidict-6.6.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:14117a41c8fdb3ee19c743b1c027da0736fdb79584d61a766da53d399b71176c", size = 243094, upload-time = "2025-06-30T15:51:14.815Z" },
+ { url = "https://files.pythonhosted.org/packages/96/19/5d9a0cfdafe65d82b616a45ae950975820289069f885328e8185e64283c2/multidict-6.6.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:877443eaaabcd0b74ff32ebeed6f6176c71850feb7d6a1d2db65945256ea535c", size = 248957, upload-time = "2025-06-30T15:51:16.076Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/dc/c90066151da87d1e489f147b9b4327927241e65f1876702fafec6729c014/multidict-6.6.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:70b72e749a4f6e7ed8fb334fa8d8496384840319512746a5f42fa0aec79f4d61", size = 243590, upload-time = "2025-06-30T15:51:17.413Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/39/458afb0cccbb0ee9164365273be3e039efddcfcb94ef35924b7dbdb05db0/multidict-6.6.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:43571f785b86afd02b3855c5ac8e86ec921b760298d6f82ff2a61daf5a35330b", size = 237487, upload-time = "2025-06-30T15:51:19.039Z" },
+ { url = "https://files.pythonhosted.org/packages/35/38/0016adac3990426610a081787011177e661875546b434f50a26319dc8372/multidict-6.6.3-cp310-cp310-win32.whl", hash = "sha256:20c5a0c3c13a15fd5ea86c42311859f970070e4e24de5a550e99d7c271d76318", size = 41390, upload-time = "2025-06-30T15:51:20.362Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/d2/17897a8f3f2c5363d969b4c635aa40375fe1f09168dc09a7826780bfb2a4/multidict-6.6.3-cp310-cp310-win_amd64.whl", hash = "sha256:ab0a34a007704c625e25a9116c6770b4d3617a071c8a7c30cd338dfbadfe6485", size = 45954, upload-time = "2025-06-30T15:51:21.383Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/5f/d4a717c1e457fe44072e33fa400d2b93eb0f2819c4d669381f925b7cba1f/multidict-6.6.3-cp310-cp310-win_arm64.whl", hash = "sha256:769841d70ca8bdd140a715746199fc6473414bd02efd678d75681d2d6a8986c5", size = 42981, upload-time = "2025-06-30T15:51:22.809Z" },
+ { url = "https://files.pythonhosted.org/packages/08/f0/1a39863ced51f639c81a5463fbfa9eb4df59c20d1a8769ab9ef4ca57ae04/multidict-6.6.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:18f4eba0cbac3546b8ae31e0bbc55b02c801ae3cbaf80c247fcdd89b456ff58c", size = 76445, upload-time = "2025-06-30T15:51:24.01Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/0e/a7cfa451c7b0365cd844e90b41e21fab32edaa1e42fc0c9f68461ce44ed7/multidict-6.6.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef43b5dd842382329e4797c46f10748d8c2b6e0614f46b4afe4aee9ac33159df", size = 44610, upload-time = "2025-06-30T15:51:25.158Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/bb/a14a4efc5ee748cc1904b0748be278c31b9295ce5f4d2ef66526f410b94d/multidict-6.6.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bf9bd1fd5eec01494e0f2e8e446a74a85d5e49afb63d75a9934e4a5423dba21d", size = 44267, upload-time = "2025-06-30T15:51:26.326Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/f8/410677d563c2d55e063ef74fe578f9d53fe6b0a51649597a5861f83ffa15/multidict-6.6.3-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:5bd8d6f793a787153956cd35e24f60485bf0651c238e207b9a54f7458b16d539", size = 230004, upload-time = "2025-06-30T15:51:27.491Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/df/2b787f80059314a98e1ec6a4cc7576244986df3e56b3c755e6fc7c99e038/multidict-6.6.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bf99b4daf908c73856bd87ee0a2499c3c9a3d19bb04b9c6025e66af3fd07462", size = 247196, upload-time = "2025-06-30T15:51:28.762Z" },
+ { url = "https://files.pythonhosted.org/packages/05/f2/f9117089151b9a8ab39f9019620d10d9718eec2ac89e7ca9d30f3ec78e96/multidict-6.6.3-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b9e59946b49dafaf990fd9c17ceafa62976e8471a14952163d10a7a630413a9", size = 225337, upload-time = "2025-06-30T15:51:30.025Z" },
+ { url = "https://files.pythonhosted.org/packages/93/2d/7115300ec5b699faa152c56799b089a53ed69e399c3c2d528251f0aeda1a/multidict-6.6.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:e2db616467070d0533832d204c54eea6836a5e628f2cb1e6dfd8cd6ba7277cb7", size = 257079, upload-time = "2025-06-30T15:51:31.716Z" },
+ { url = "https://files.pythonhosted.org/packages/15/ea/ff4bab367623e39c20d3b07637225c7688d79e4f3cc1f3b9f89867677f9a/multidict-6.6.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7394888236621f61dcdd25189b2768ae5cc280f041029a5bcf1122ac63df79f9", size = 255461, upload-time = "2025-06-30T15:51:33.029Z" },
+ { url = "https://files.pythonhosted.org/packages/74/07/2c9246cda322dfe08be85f1b8739646f2c4c5113a1422d7a407763422ec4/multidict-6.6.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f114d8478733ca7388e7c7e0ab34b72547476b97009d643644ac33d4d3fe1821", size = 246611, upload-time = "2025-06-30T15:51:34.47Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/62/279c13d584207d5697a752a66ffc9bb19355a95f7659140cb1b3cf82180e/multidict-6.6.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cdf22e4db76d323bcdc733514bf732e9fb349707c98d341d40ebcc6e9318ef3d", size = 243102, upload-time = "2025-06-30T15:51:36.525Z" },
+ { url = "https://files.pythonhosted.org/packages/69/cc/e06636f48c6d51e724a8bc8d9e1db5f136fe1df066d7cafe37ef4000f86a/multidict-6.6.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e995a34c3d44ab511bfc11aa26869b9d66c2d8c799fa0e74b28a473a692532d6", size = 238693, upload-time = "2025-06-30T15:51:38.278Z" },
+ { url = "https://files.pythonhosted.org/packages/89/a4/66c9d8fb9acf3b226cdd468ed009537ac65b520aebdc1703dd6908b19d33/multidict-6.6.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:766a4a5996f54361d8d5a9050140aa5362fe48ce51c755a50c0bc3706460c430", size = 246582, upload-time = "2025-06-30T15:51:39.709Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/01/c69e0317be556e46257826d5449feb4e6aa0d18573e567a48a2c14156f1f/multidict-6.6.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:3893a0d7d28a7fe6ca7a1f760593bc13038d1d35daf52199d431b61d2660602b", size = 253355, upload-time = "2025-06-30T15:51:41.013Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/da/9cc1da0299762d20e626fe0042e71b5694f9f72d7d3f9678397cbaa71b2b/multidict-6.6.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:934796c81ea996e61914ba58064920d6cad5d99140ac3167901eb932150e2e56", size = 247774, upload-time = "2025-06-30T15:51:42.291Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/91/b22756afec99cc31105ddd4a52f95ab32b1a4a58f4d417979c570c4a922e/multidict-6.6.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9ed948328aec2072bc00f05d961ceadfd3e9bfc2966c1319aeaf7b7c21219183", size = 242275, upload-time = "2025-06-30T15:51:43.642Z" },
+ { url = "https://files.pythonhosted.org/packages/be/f1/adcc185b878036a20399d5be5228f3cbe7f823d78985d101d425af35c800/multidict-6.6.3-cp311-cp311-win32.whl", hash = "sha256:9f5b28c074c76afc3e4c610c488e3493976fe0e596dd3db6c8ddfbb0134dcac5", size = 41290, upload-time = "2025-06-30T15:51:45.264Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/d4/27652c1c6526ea6b4f5ddd397e93f4232ff5de42bea71d339bc6a6cc497f/multidict-6.6.3-cp311-cp311-win_amd64.whl", hash = "sha256:bc7f6fbc61b1c16050a389c630da0b32fc6d4a3d191394ab78972bf5edc568c2", size = 45942, upload-time = "2025-06-30T15:51:46.377Z" },
+ { url = "https://files.pythonhosted.org/packages/16/18/23f4932019804e56d3c2413e237f866444b774b0263bcb81df2fdecaf593/multidict-6.6.3-cp311-cp311-win_arm64.whl", hash = "sha256:d4e47d8faffaae822fb5cba20937c048d4f734f43572e7079298a6c39fb172cb", size = 42880, upload-time = "2025-06-30T15:51:47.561Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/a0/6b57988ea102da0623ea814160ed78d45a2645e4bbb499c2896d12833a70/multidict-6.6.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:056bebbeda16b2e38642d75e9e5310c484b7c24e3841dc0fb943206a72ec89d6", size = 76514, upload-time = "2025-06-30T15:51:48.728Z" },
+ { url = "https://files.pythonhosted.org/packages/07/7a/d1e92665b0850c6c0508f101f9cf0410c1afa24973e1115fe9c6a185ebf7/multidict-6.6.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e5f481cccb3c5c5e5de5d00b5141dc589c1047e60d07e85bbd7dea3d4580d63f", size = 45394, upload-time = "2025-06-30T15:51:49.986Z" },
+ { url = "https://files.pythonhosted.org/packages/52/6f/dd104490e01be6ef8bf9573705d8572f8c2d2c561f06e3826b081d9e6591/multidict-6.6.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:10bea2ee839a759ee368b5a6e47787f399b41e70cf0c20d90dfaf4158dfb4e55", size = 43590, upload-time = "2025-06-30T15:51:51.331Z" },
+ { url = "https://files.pythonhosted.org/packages/44/fe/06e0e01b1b0611e6581b7fd5a85b43dacc08b6cea3034f902f383b0873e5/multidict-6.6.3-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:2334cfb0fa9549d6ce2c21af2bfbcd3ac4ec3646b1b1581c88e3e2b1779ec92b", size = 237292, upload-time = "2025-06-30T15:51:52.584Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/71/4f0e558fb77696b89c233c1ee2d92f3e1d5459070a0e89153c9e9e804186/multidict-6.6.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8fee016722550a2276ca2cb5bb624480e0ed2bd49125b2b73b7010b9090e888", size = 258385, upload-time = "2025-06-30T15:51:53.913Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/25/cca0e68228addad24903801ed1ab42e21307a1b4b6dd2cf63da5d3ae082a/multidict-6.6.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5511cb35f5c50a2db21047c875eb42f308c5583edf96bd8ebf7d770a9d68f6d", size = 242328, upload-time = "2025-06-30T15:51:55.672Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/a3/46f2d420d86bbcb8fe660b26a10a219871a0fbf4d43cb846a4031533f3e0/multidict-6.6.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:712b348f7f449948e0a6c4564a21c7db965af900973a67db432d724619b3c680", size = 268057, upload-time = "2025-06-30T15:51:57.037Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/73/1c743542fe00794a2ec7466abd3f312ccb8fad8dff9f36d42e18fb1ec33e/multidict-6.6.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e4e15d2138ee2694e038e33b7c3da70e6b0ad8868b9f8094a72e1414aeda9c1a", size = 269341, upload-time = "2025-06-30T15:51:59.111Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/11/6ec9dcbe2264b92778eeb85407d1df18812248bf3506a5a1754bc035db0c/multidict-6.6.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8df25594989aebff8a130f7899fa03cbfcc5d2b5f4a461cf2518236fe6f15961", size = 256081, upload-time = "2025-06-30T15:52:00.533Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/2b/631b1e2afeb5f1696846d747d36cda075bfdc0bc7245d6ba5c319278d6c4/multidict-6.6.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:159ca68bfd284a8860f8d8112cf0521113bffd9c17568579e4d13d1f1dc76b65", size = 253581, upload-time = "2025-06-30T15:52:02.43Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/0e/7e3b93f79efeb6111d3bf9a1a69e555ba1d07ad1c11bceb56b7310d0d7ee/multidict-6.6.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e098c17856a8c9ade81b4810888c5ad1914099657226283cab3062c0540b0643", size = 250750, upload-time = "2025-06-30T15:52:04.26Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/9e/086846c1d6601948e7de556ee464a2d4c85e33883e749f46b9547d7b0704/multidict-6.6.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:67c92ed673049dec52d7ed39f8cf9ebbadf5032c774058b4406d18c8f8fe7063", size = 251548, upload-time = "2025-06-30T15:52:06.002Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/7b/86ec260118e522f1a31550e87b23542294880c97cfbf6fb18cc67b044c66/multidict-6.6.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:bd0578596e3a835ef451784053cfd327d607fc39ea1a14812139339a18a0dbc3", size = 262718, upload-time = "2025-06-30T15:52:07.707Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/bd/22ce8f47abb0be04692c9fc4638508b8340987b18691aa7775d927b73f72/multidict-6.6.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:346055630a2df2115cd23ae271910b4cae40f4e336773550dca4889b12916e75", size = 259603, upload-time = "2025-06-30T15:52:09.58Z" },
+ { url = "https://files.pythonhosted.org/packages/07/9c/91b7ac1691be95cd1f4a26e36a74b97cda6aa9820632d31aab4410f46ebd/multidict-6.6.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:555ff55a359302b79de97e0468e9ee80637b0de1fce77721639f7cd9440b3a10", size = 251351, upload-time = "2025-06-30T15:52:10.947Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/5c/4d7adc739884f7a9fbe00d1eac8c034023ef8bad71f2ebe12823ca2e3649/multidict-6.6.3-cp312-cp312-win32.whl", hash = "sha256:73ab034fb8d58ff85c2bcbadc470efc3fafeea8affcf8722855fb94557f14cc5", size = 41860, upload-time = "2025-06-30T15:52:12.334Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/a3/0fbc7afdf7cb1aa12a086b02959307848eb6bcc8f66fcb66c0cb57e2a2c1/multidict-6.6.3-cp312-cp312-win_amd64.whl", hash = "sha256:04cbcce84f63b9af41bad04a54d4cc4e60e90c35b9e6ccb130be2d75b71f8c17", size = 45982, upload-time = "2025-06-30T15:52:13.6Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/95/8c825bd70ff9b02462dc18d1295dd08d3e9e4eb66856d292ffa62cfe1920/multidict-6.6.3-cp312-cp312-win_arm64.whl", hash = "sha256:0f1130b896ecb52d2a1e615260f3ea2af55fa7dc3d7c3003ba0c3121a759b18b", size = 43210, upload-time = "2025-06-30T15:52:14.893Z" },
+ { url = "https://files.pythonhosted.org/packages/52/1d/0bebcbbb4f000751fbd09957257903d6e002943fc668d841a4cf2fb7f872/multidict-6.6.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:540d3c06d48507357a7d57721e5094b4f7093399a0106c211f33540fdc374d55", size = 75843, upload-time = "2025-06-30T15:52:16.155Z" },
+ { url = "https://files.pythonhosted.org/packages/07/8f/cbe241b0434cfe257f65c2b1bcf9e8d5fb52bc708c5061fb29b0fed22bdf/multidict-6.6.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9c19cea2a690f04247d43f366d03e4eb110a0dc4cd1bbeee4d445435428ed35b", size = 45053, upload-time = "2025-06-30T15:52:17.429Z" },
+ { url = "https://files.pythonhosted.org/packages/32/d2/0b3b23f9dbad5b270b22a3ac3ea73ed0a50ef2d9a390447061178ed6bdb8/multidict-6.6.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7af039820cfd00effec86bda5d8debef711a3e86a1d3772e85bea0f243a4bd65", size = 43273, upload-time = "2025-06-30T15:52:19.346Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/fe/6eb68927e823999e3683bc49678eb20374ba9615097d085298fd5b386564/multidict-6.6.3-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:500b84f51654fdc3944e936f2922114349bf8fdcac77c3092b03449f0e5bc2b3", size = 237124, upload-time = "2025-06-30T15:52:20.773Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/ab/320d8507e7726c460cb77117848b3834ea0d59e769f36fdae495f7669929/multidict-6.6.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3fc723ab8a5c5ed6c50418e9bfcd8e6dceba6c271cee6728a10a4ed8561520c", size = 256892, upload-time = "2025-06-30T15:52:22.242Z" },
+ { url = "https://files.pythonhosted.org/packages/76/60/38ee422db515ac69834e60142a1a69111ac96026e76e8e9aa347fd2e4591/multidict-6.6.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:94c47ea3ade005b5976789baaed66d4de4480d0a0bf31cef6edaa41c1e7b56a6", size = 240547, upload-time = "2025-06-30T15:52:23.736Z" },
+ { url = "https://files.pythonhosted.org/packages/27/fb/905224fde2dff042b030c27ad95a7ae744325cf54b890b443d30a789b80e/multidict-6.6.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:dbc7cf464cc6d67e83e136c9f55726da3a30176f020a36ead246eceed87f1cd8", size = 266223, upload-time = "2025-06-30T15:52:25.185Z" },
+ { url = "https://files.pythonhosted.org/packages/76/35/dc38ab361051beae08d1a53965e3e1a418752fc5be4d3fb983c5582d8784/multidict-6.6.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:900eb9f9da25ada070f8ee4a23f884e0ee66fe4e1a38c3af644256a508ad81ca", size = 267262, upload-time = "2025-06-30T15:52:26.969Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/a3/0a485b7f36e422421b17e2bbb5a81c1af10eac1d4476f2ff92927c730479/multidict-6.6.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7c6df517cf177da5d47ab15407143a89cd1a23f8b335f3a28d57e8b0a3dbb884", size = 254345, upload-time = "2025-06-30T15:52:28.467Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/59/bcdd52c1dab7c0e0d75ff19cac751fbd5f850d1fc39172ce809a74aa9ea4/multidict-6.6.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4ef421045f13879e21c994b36e728d8e7d126c91a64b9185810ab51d474f27e7", size = 252248, upload-time = "2025-06-30T15:52:29.938Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/a4/2d96aaa6eae8067ce108d4acee6f45ced5728beda55c0f02ae1072c730d1/multidict-6.6.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:6c1e61bb4f80895c081790b6b09fa49e13566df8fbff817da3f85b3a8192e36b", size = 250115, upload-time = "2025-06-30T15:52:31.416Z" },
+ { url = "https://files.pythonhosted.org/packages/25/d2/ed9f847fa5c7d0677d4f02ea2c163d5e48573de3f57bacf5670e43a5ffaa/multidict-6.6.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e5e8523bb12d7623cd8300dbd91b9e439a46a028cd078ca695eb66ba31adee3c", size = 249649, upload-time = "2025-06-30T15:52:32.996Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/af/9155850372563fc550803d3f25373308aa70f59b52cff25854086ecb4a79/multidict-6.6.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ef58340cc896219e4e653dade08fea5c55c6df41bcc68122e3be3e9d873d9a7b", size = 261203, upload-time = "2025-06-30T15:52:34.521Z" },
+ { url = "https://files.pythonhosted.org/packages/36/2f/c6a728f699896252cf309769089568a33c6439626648843f78743660709d/multidict-6.6.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fc9dc435ec8699e7b602b94fe0cd4703e69273a01cbc34409af29e7820f777f1", size = 258051, upload-time = "2025-06-30T15:52:35.999Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/60/689880776d6b18fa2b70f6cc74ff87dd6c6b9b47bd9cf74c16fecfaa6ad9/multidict-6.6.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9e864486ef4ab07db5e9cb997bad2b681514158d6954dd1958dfb163b83d53e6", size = 249601, upload-time = "2025-06-30T15:52:37.473Z" },
+ { url = "https://files.pythonhosted.org/packages/75/5e/325b11f2222a549019cf2ef879c1f81f94a0d40ace3ef55cf529915ba6cc/multidict-6.6.3-cp313-cp313-win32.whl", hash = "sha256:5633a82fba8e841bc5c5c06b16e21529573cd654f67fd833650a215520a6210e", size = 41683, upload-time = "2025-06-30T15:52:38.927Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/ad/cf46e73f5d6e3c775cabd2a05976547f3f18b39bee06260369a42501f053/multidict-6.6.3-cp313-cp313-win_amd64.whl", hash = "sha256:e93089c1570a4ad54c3714a12c2cef549dc9d58e97bcded193d928649cab78e9", size = 45811, upload-time = "2025-06-30T15:52:40.207Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/c9/2e3fe950db28fb7c62e1a5f46e1e38759b072e2089209bc033c2798bb5ec/multidict-6.6.3-cp313-cp313-win_arm64.whl", hash = "sha256:c60b401f192e79caec61f166da9c924e9f8bc65548d4246842df91651e83d600", size = 43056, upload-time = "2025-06-30T15:52:41.575Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/58/aaf8114cf34966e084a8cc9517771288adb53465188843d5a19862cb6dc3/multidict-6.6.3-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:02fd8f32d403a6ff13864b0851f1f523d4c988051eea0471d4f1fd8010f11134", size = 82811, upload-time = "2025-06-30T15:52:43.281Z" },
+ { url = "https://files.pythonhosted.org/packages/71/af/5402e7b58a1f5b987a07ad98f2501fdba2a4f4b4c30cf114e3ce8db64c87/multidict-6.6.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:f3aa090106b1543f3f87b2041eef3c156c8da2aed90c63a2fbed62d875c49c37", size = 48304, upload-time = "2025-06-30T15:52:45.026Z" },
+ { url = "https://files.pythonhosted.org/packages/39/65/ab3c8cafe21adb45b24a50266fd747147dec7847425bc2a0f6934b3ae9ce/multidict-6.6.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e924fb978615a5e33ff644cc42e6aa241effcf4f3322c09d4f8cebde95aff5f8", size = 46775, upload-time = "2025-06-30T15:52:46.459Z" },
+ { url = "https://files.pythonhosted.org/packages/49/ba/9fcc1b332f67cc0c0c8079e263bfab6660f87fe4e28a35921771ff3eea0d/multidict-6.6.3-cp313-cp313t-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:b9fe5a0e57c6dbd0e2ce81ca66272282c32cd11d31658ee9553849d91289e1c1", size = 229773, upload-time = "2025-06-30T15:52:47.88Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/14/0145a251f555f7c754ce2dcbcd012939bbd1f34f066fa5d28a50e722a054/multidict-6.6.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b24576f208793ebae00280c59927c3b7c2a3b1655e443a25f753c4611bc1c373", size = 250083, upload-time = "2025-06-30T15:52:49.366Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/d4/d5c0bd2bbb173b586c249a151a26d2fb3ec7d53c96e42091c9fef4e1f10c/multidict-6.6.3-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:135631cb6c58eac37d7ac0df380294fecdc026b28837fa07c02e459c7fb9c54e", size = 228980, upload-time = "2025-06-30T15:52:50.903Z" },
+ { url = "https://files.pythonhosted.org/packages/21/32/c9a2d8444a50ec48c4733ccc67254100c10e1c8ae8e40c7a2d2183b59b97/multidict-6.6.3-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:274d416b0df887aef98f19f21578653982cfb8a05b4e187d4a17103322eeaf8f", size = 257776, upload-time = "2025-06-30T15:52:52.764Z" },
+ { url = "https://files.pythonhosted.org/packages/68/d0/14fa1699f4ef629eae08ad6201c6b476098f5efb051b296f4c26be7a9fdf/multidict-6.6.3-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e252017a817fad7ce05cafbe5711ed40faeb580e63b16755a3a24e66fa1d87c0", size = 256882, upload-time = "2025-06-30T15:52:54.596Z" },
+ { url = "https://files.pythonhosted.org/packages/da/88/84a27570fbe303c65607d517a5f147cd2fc046c2d1da02b84b17b9bdc2aa/multidict-6.6.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e4cc8d848cd4fe1cdee28c13ea79ab0ed37fc2e89dd77bac86a2e7959a8c3bc", size = 247816, upload-time = "2025-06-30T15:52:56.175Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/60/dca352a0c999ce96a5d8b8ee0b2b9f729dcad2e0b0c195f8286269a2074c/multidict-6.6.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9e236a7094b9c4c1b7585f6b9cca34b9d833cf079f7e4c49e6a4a6ec9bfdc68f", size = 245341, upload-time = "2025-06-30T15:52:57.752Z" },
+ { url = "https://files.pythonhosted.org/packages/50/ef/433fa3ed06028f03946f3993223dada70fb700f763f70c00079533c34578/multidict-6.6.3-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:e0cb0ab69915c55627c933f0b555a943d98ba71b4d1c57bc0d0a66e2567c7471", size = 235854, upload-time = "2025-06-30T15:52:59.74Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/1f/487612ab56fbe35715320905215a57fede20de7db40a261759690dc80471/multidict-6.6.3-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:81ef2f64593aba09c5212a3d0f8c906a0d38d710a011f2f42759704d4557d3f2", size = 243432, upload-time = "2025-06-30T15:53:01.602Z" },
+ { url = "https://files.pythonhosted.org/packages/da/6f/ce8b79de16cd885c6f9052c96a3671373d00c59b3ee635ea93e6e81b8ccf/multidict-6.6.3-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:b9cbc60010de3562545fa198bfc6d3825df430ea96d2cc509c39bd71e2e7d648", size = 252731, upload-time = "2025-06-30T15:53:03.517Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/fe/a2514a6aba78e5abefa1624ca85ae18f542d95ac5cde2e3815a9fbf369aa/multidict-6.6.3-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:70d974eaaa37211390cd02ef93b7e938de564bbffa866f0b08d07e5e65da783d", size = 247086, upload-time = "2025-06-30T15:53:05.48Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/22/b788718d63bb3cce752d107a57c85fcd1a212c6c778628567c9713f9345a/multidict-6.6.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:3713303e4a6663c6d01d648a68f2848701001f3390a030edaaf3fc949c90bf7c", size = 243338, upload-time = "2025-06-30T15:53:07.522Z" },
+ { url = "https://files.pythonhosted.org/packages/22/d6/fdb3d0670819f2228f3f7d9af613d5e652c15d170c83e5f1c94fbc55a25b/multidict-6.6.3-cp313-cp313t-win32.whl", hash = "sha256:639ecc9fe7cd73f2495f62c213e964843826f44505a3e5d82805aa85cac6f89e", size = 47812, upload-time = "2025-06-30T15:53:09.263Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/d6/a9d2c808f2c489ad199723197419207ecbfbc1776f6e155e1ecea9c883aa/multidict-6.6.3-cp313-cp313t-win_amd64.whl", hash = "sha256:9f97e181f344a0ef3881b573d31de8542cc0dbc559ec68c8f8b5ce2c2e91646d", size = 53011, upload-time = "2025-06-30T15:53:11.038Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/40/b68001cba8188dd267590a111f9661b6256debc327137667e832bf5d66e8/multidict-6.6.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ce8b7693da41a3c4fde5871c738a81490cea5496c671d74374c8ab889e1834fb", size = 45254, upload-time = "2025-06-30T15:53:12.421Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/30/9aec301e9772b098c1f5c0ca0279237c9766d94b97802e9888010c64b0ed/multidict-6.6.3-py3-none-any.whl", hash = "sha256:8db10f29c7541fc5da4defd8cd697e1ca429db743fa716325f236079b96f775a", size = 12313, upload-time = "2025-06-30T15:53:45.437Z" },
+]
+
+[[package]]
+name = "mypy"
+version = "1.17.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "mypy-extensions" },
+ { name = "pathspec" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/8e/22/ea637422dedf0bf36f3ef238eab4e455e2a0dcc3082b5cc067615347ab8e/mypy-1.17.1.tar.gz", hash = "sha256:25e01ec741ab5bb3eec8ba9cdb0f769230368a22c959c4937360efb89b7e9f01", size = 3352570, upload-time = "2025-07-31T07:54:19.204Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/77/a9/3d7aa83955617cdf02f94e50aab5c830d205cfa4320cf124ff64acce3a8e/mypy-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3fbe6d5555bf608c47203baa3e72dbc6ec9965b3d7c318aa9a4ca76f465bd972", size = 11003299, upload-time = "2025-07-31T07:54:06.425Z" },
+ { url = "https://files.pythonhosted.org/packages/83/e8/72e62ff837dd5caaac2b4a5c07ce769c8e808a00a65e5d8f94ea9c6f20ab/mypy-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80ef5c058b7bce08c83cac668158cb7edea692e458d21098c7d3bce35a5d43e7", size = 10125451, upload-time = "2025-07-31T07:53:52.974Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/10/f3f3543f6448db11881776f26a0ed079865926b0c841818ee22de2c6bbab/mypy-1.17.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a580f8a70c69e4a75587bd925d298434057fe2a428faaf927ffe6e4b9a98df", size = 11916211, upload-time = "2025-07-31T07:53:18.879Z" },
+ { url = "https://files.pythonhosted.org/packages/06/bf/63e83ed551282d67bb3f7fea2cd5561b08d2bb6eb287c096539feb5ddbc5/mypy-1.17.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dd86bb649299f09d987a2eebb4d52d10603224500792e1bee18303bbcc1ce390", size = 12652687, upload-time = "2025-07-31T07:53:30.544Z" },
+ { url = "https://files.pythonhosted.org/packages/69/66/68f2eeef11facf597143e85b694a161868b3b006a5fbad50e09ea117ef24/mypy-1.17.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:a76906f26bd8d51ea9504966a9c25419f2e668f012e0bdf3da4ea1526c534d94", size = 12896322, upload-time = "2025-07-31T07:53:50.74Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/87/8e3e9c2c8bd0d7e071a89c71be28ad088aaecbadf0454f46a540bda7bca6/mypy-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:e79311f2d904ccb59787477b7bd5d26f3347789c06fcd7656fa500875290264b", size = 9507962, upload-time = "2025-07-31T07:53:08.431Z" },
+ { url = "https://files.pythonhosted.org/packages/46/cf/eadc80c4e0a70db1c08921dcc220357ba8ab2faecb4392e3cebeb10edbfa/mypy-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad37544be07c5d7fba814eb370e006df58fed8ad1ef33ed1649cb1889ba6ff58", size = 10921009, upload-time = "2025-07-31T07:53:23.037Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/c1/c869d8c067829ad30d9bdae051046561552516cfb3a14f7f0347b7d973ee/mypy-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:064e2ff508e5464b4bd807a7c1625bc5047c5022b85c70f030680e18f37273a5", size = 10047482, upload-time = "2025-07-31T07:53:26.151Z" },
+ { url = "https://files.pythonhosted.org/packages/98/b9/803672bab3fe03cee2e14786ca056efda4bb511ea02dadcedde6176d06d0/mypy-1.17.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:70401bbabd2fa1aa7c43bb358f54037baf0586f41e83b0ae67dd0534fc64edfd", size = 11832883, upload-time = "2025-07-31T07:53:47.948Z" },
+ { url = "https://files.pythonhosted.org/packages/88/fb/fcdac695beca66800918c18697b48833a9a6701de288452b6715a98cfee1/mypy-1.17.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e92bdc656b7757c438660f775f872a669b8ff374edc4d18277d86b63edba6b8b", size = 12566215, upload-time = "2025-07-31T07:54:04.031Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/37/a932da3d3dace99ee8eb2043b6ab03b6768c36eb29a02f98f46c18c0da0e/mypy-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c1fdf4abb29ed1cb091cf432979e162c208a5ac676ce35010373ff29247bcad5", size = 12751956, upload-time = "2025-07-31T07:53:36.263Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/cf/6438a429e0f2f5cab8bc83e53dbebfa666476f40ee322e13cac5e64b79e7/mypy-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:ff2933428516ab63f961644bc49bc4cbe42bbffb2cd3b71cc7277c07d16b1a8b", size = 9507307, upload-time = "2025-07-31T07:53:59.734Z" },
+ { url = "https://files.pythonhosted.org/packages/17/a2/7034d0d61af8098ec47902108553122baa0f438df8a713be860f7407c9e6/mypy-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:69e83ea6553a3ba79c08c6e15dbd9bfa912ec1e493bf75489ef93beb65209aeb", size = 11086295, upload-time = "2025-07-31T07:53:28.124Z" },
+ { url = "https://files.pythonhosted.org/packages/14/1f/19e7e44b594d4b12f6ba8064dbe136505cec813549ca3e5191e40b1d3cc2/mypy-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b16708a66d38abb1e6b5702f5c2c87e133289da36f6a1d15f6a5221085c6403", size = 10112355, upload-time = "2025-07-31T07:53:21.121Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/69/baa33927e29e6b4c55d798a9d44db5d394072eef2bdc18c3e2048c9ed1e9/mypy-1.17.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:89e972c0035e9e05823907ad5398c5a73b9f47a002b22359b177d40bdaee7056", size = 11875285, upload-time = "2025-07-31T07:53:55.293Z" },
+ { url = "https://files.pythonhosted.org/packages/90/13/f3a89c76b0a41e19490b01e7069713a30949d9a6c147289ee1521bcea245/mypy-1.17.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03b6d0ed2b188e35ee6d5c36b5580cffd6da23319991c49ab5556c023ccf1341", size = 12737895, upload-time = "2025-07-31T07:53:43.623Z" },
+ { url = "https://files.pythonhosted.org/packages/23/a1/c4ee79ac484241301564072e6476c5a5be2590bc2e7bfd28220033d2ef8f/mypy-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c837b896b37cd103570d776bda106eabb8737aa6dd4f248451aecf53030cdbeb", size = 12931025, upload-time = "2025-07-31T07:54:17.125Z" },
+ { url = "https://files.pythonhosted.org/packages/89/b8/7409477be7919a0608900e6320b155c72caab4fef46427c5cc75f85edadd/mypy-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:665afab0963a4b39dff7c1fa563cc8b11ecff7910206db4b2e64dd1ba25aed19", size = 9584664, upload-time = "2025-07-31T07:54:12.842Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/82/aec2fc9b9b149f372850291827537a508d6c4d3664b1750a324b91f71355/mypy-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:93378d3203a5c0800c6b6d850ad2f19f7a3cdf1a3701d3416dbf128805c6a6a7", size = 11075338, upload-time = "2025-07-31T07:53:38.873Z" },
+ { url = "https://files.pythonhosted.org/packages/07/ac/ee93fbde9d2242657128af8c86f5d917cd2887584cf948a8e3663d0cd737/mypy-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:15d54056f7fe7a826d897789f53dd6377ec2ea8ba6f776dc83c2902b899fee81", size = 10113066, upload-time = "2025-07-31T07:54:14.707Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/68/946a1e0be93f17f7caa56c45844ec691ca153ee8b62f21eddda336a2d203/mypy-1.17.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:209a58fed9987eccc20f2ca94afe7257a8f46eb5df1fb69958650973230f91e6", size = 11875473, upload-time = "2025-07-31T07:53:14.504Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/0f/478b4dce1cb4f43cf0f0d00fba3030b21ca04a01b74d1cd272a528cf446f/mypy-1.17.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:099b9a5da47de9e2cb5165e581f158e854d9e19d2e96b6698c0d64de911dd849", size = 12744296, upload-time = "2025-07-31T07:53:03.896Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/70/afa5850176379d1b303f992a828de95fc14487429a7139a4e0bdd17a8279/mypy-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ffadfbe6994d724c5a1bb6123a7d27dd68fc9c059561cd33b664a79578e14", size = 12914657, upload-time = "2025-07-31T07:54:08.576Z" },
+ { url = "https://files.pythonhosted.org/packages/53/f9/4a83e1c856a3d9c8f6edaa4749a4864ee98486e9b9dbfbc93842891029c2/mypy-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:9a2b7d9180aed171f033c9f2fc6c204c1245cf60b0cb61cf2e7acc24eea78e0a", size = 9593320, upload-time = "2025-07-31T07:53:01.341Z" },
+ { url = "https://files.pythonhosted.org/packages/38/56/79c2fac86da57c7d8c48622a05873eaab40b905096c33597462713f5af90/mypy-1.17.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:15a83369400454c41ed3a118e0cc58bd8123921a602f385cb6d6ea5df050c733", size = 11040037, upload-time = "2025-07-31T07:54:10.942Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/c3/adabe6ff53638e3cad19e3547268482408323b1e68bf082c9119000cd049/mypy-1.17.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55b918670f692fc9fba55c3298d8a3beae295c5cded0a55dccdc5bbead814acd", size = 10131550, upload-time = "2025-07-31T07:53:41.307Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/c5/2e234c22c3bdeb23a7817af57a58865a39753bde52c74e2c661ee0cfc640/mypy-1.17.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:62761474061feef6f720149d7ba876122007ddc64adff5ba6f374fda35a018a0", size = 11872963, upload-time = "2025-07-31T07:53:16.878Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/26/c13c130f35ca8caa5f2ceab68a247775648fdcd6c9a18f158825f2bc2410/mypy-1.17.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c49562d3d908fd49ed0938e5423daed8d407774a479b595b143a3d7f87cdae6a", size = 12710189, upload-time = "2025-07-31T07:54:01.962Z" },
+ { url = "https://files.pythonhosted.org/packages/82/df/c7d79d09f6de8383fe800521d066d877e54d30b4fb94281c262be2df84ef/mypy-1.17.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:397fba5d7616a5bc60b45c7ed204717eaddc38f826e3645402c426057ead9a91", size = 12900322, upload-time = "2025-07-31T07:53:10.551Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/98/3d5a48978b4f708c55ae832619addc66d677f6dc59f3ebad71bae8285ca6/mypy-1.17.1-cp314-cp314-win_amd64.whl", hash = "sha256:9d6b20b97d373f41617bd0708fd46aa656059af57f2ef72aa8c7d6a2b73b74ed", size = 9751879, upload-time = "2025-07-31T07:52:56.683Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/f3/8fcd2af0f5b806f6cf463efaffd3c9548a28f84220493ecd38d127b6b66d/mypy-1.17.1-py3-none-any.whl", hash = "sha256:a9f52c0351c21fe24c21d8c0eb1f62967b262d6729393397b6f443c3b773c3b9", size = 2283411, upload-time = "2025-07-31T07:53:24.664Z" },
+]
+
+[[package]]
+name = "mypy-extensions"
+version = "1.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
+]
+
+[[package]]
+name = "networkx"
+version = "3.4.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fd/1d/06475e1cd5264c0b870ea2cc6fdb3e37177c1e565c43f56ff17a10e3937f/networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1", size = 2151368, upload-time = "2024-10-21T12:39:38.695Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f", size = 1723263, upload-time = "2024-10-21T12:39:36.247Z" },
+]
+
+[[package]]
+name = "networkx"
+version = "3.5"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065, upload-time = "2025-05-29T11:35:07.804Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406, upload-time = "2025-05-29T11:35:04.961Z" },
+]
+
+[[package]]
+name = "nodeenv"
+version = "1.9.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" },
+]
+
+[[package]]
+name = "numpy"
+version = "2.2.6"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/76/21/7d2a95e4bba9dc13d043ee156a356c0a8f0c6309dff6b21b4d71a073b8a8/numpy-2.2.6.tar.gz", hash = "sha256:e29554e2bef54a90aa5cc07da6ce955accb83f21ab5de01a62c8478897b264fd", size = 20276440, upload-time = "2025-05-17T22:38:04.611Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9a/3e/ed6db5be21ce87955c0cbd3009f2803f59fa08df21b5df06862e2d8e2bdd/numpy-2.2.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b412caa66f72040e6d268491a59f2c43bf03eb6c96dd8f0307829feb7fa2b6fb", size = 21165245, upload-time = "2025-05-17T21:27:58.555Z" },
+ { url = "https://files.pythonhosted.org/packages/22/c2/4b9221495b2a132cc9d2eb862e21d42a009f5a60e45fc44b00118c174bff/numpy-2.2.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e41fd67c52b86603a91c1a505ebaef50b3314de0213461c7a6e99c9a3beff90", size = 14360048, upload-time = "2025-05-17T21:28:21.406Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/77/dc2fcfc66943c6410e2bf598062f5959372735ffda175b39906d54f02349/numpy-2.2.6-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:37e990a01ae6ec7fe7fa1c26c55ecb672dd98b19c3d0e1d1f326fa13cb38d163", size = 5340542, upload-time = "2025-05-17T21:28:30.931Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/4f/1cb5fdc353a5f5cc7feb692db9b8ec2c3d6405453f982435efc52561df58/numpy-2.2.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:5a6429d4be8ca66d889b7cf70f536a397dc45ba6faeb5f8c5427935d9592e9cf", size = 6878301, upload-time = "2025-05-17T21:28:41.613Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/17/96a3acd228cec142fcb8723bd3cc39c2a474f7dcf0a5d16731980bcafa95/numpy-2.2.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efd28d4e9cd7d7a8d39074a4d44c63eda73401580c5c76acda2ce969e0a38e83", size = 14297320, upload-time = "2025-05-17T21:29:02.78Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/63/3de6a34ad7ad6646ac7d2f55ebc6ad439dbbf9c4370017c50cf403fb19b5/numpy-2.2.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc7b73d02efb0e18c000e9ad8b83480dfcd5dfd11065997ed4c6747470ae8915", size = 16801050, upload-time = "2025-05-17T21:29:27.675Z" },
+ { url = "https://files.pythonhosted.org/packages/07/b6/89d837eddef52b3d0cec5c6ba0456c1bf1b9ef6a6672fc2b7873c3ec4e2e/numpy-2.2.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:74d4531beb257d2c3f4b261bfb0fc09e0f9ebb8842d82a7b4209415896adc680", size = 15807034, upload-time = "2025-05-17T21:29:51.102Z" },
+ { url = "https://files.pythonhosted.org/packages/01/c8/dc6ae86e3c61cfec1f178e5c9f7858584049b6093f843bca541f94120920/numpy-2.2.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8fc377d995680230e83241d8a96def29f204b5782f371c532579b4f20607a289", size = 18614185, upload-time = "2025-05-17T21:30:18.703Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/c5/0064b1b7e7c89137b471ccec1fd2282fceaae0ab3a9550f2568782d80357/numpy-2.2.6-cp310-cp310-win32.whl", hash = "sha256:b093dd74e50a8cba3e873868d9e93a85b78e0daf2e98c6797566ad8044e8363d", size = 6527149, upload-time = "2025-05-17T21:30:29.788Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/dd/4b822569d6b96c39d1215dbae0582fd99954dcbcf0c1a13c61783feaca3f/numpy-2.2.6-cp310-cp310-win_amd64.whl", hash = "sha256:f0fd6321b839904e15c46e0d257fdd101dd7f530fe03fd6359c1ea63738703f3", size = 12904620, upload-time = "2025-05-17T21:30:48.994Z" },
+ { url = "https://files.pythonhosted.org/packages/da/a8/4f83e2aa666a9fbf56d6118faaaf5f1974d456b1823fda0a176eff722839/numpy-2.2.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f9f1adb22318e121c5c69a09142811a201ef17ab257a1e66ca3025065b7f53ae", size = 21176963, upload-time = "2025-05-17T21:31:19.36Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/2b/64e1affc7972decb74c9e29e5649fac940514910960ba25cd9af4488b66c/numpy-2.2.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c820a93b0255bc360f53eca31a0e676fd1101f673dda8da93454a12e23fc5f7a", size = 14406743, upload-time = "2025-05-17T21:31:41.087Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/9f/0121e375000b5e50ffdd8b25bf78d8e1a5aa4cca3f185d41265198c7b834/numpy-2.2.6-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3d70692235e759f260c3d837193090014aebdf026dfd167834bcba43e30c2a42", size = 5352616, upload-time = "2025-05-17T21:31:50.072Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0d/b48c405c91693635fbe2dcd7bc84a33a602add5f63286e024d3b6741411c/numpy-2.2.6-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:481b49095335f8eed42e39e8041327c05b0f6f4780488f61286ed3c01368d491", size = 6889579, upload-time = "2025-05-17T21:32:01.712Z" },
+ { url = "https://files.pythonhosted.org/packages/52/b8/7f0554d49b565d0171eab6e99001846882000883998e7b7d9f0d98b1f934/numpy-2.2.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b64d8d4d17135e00c8e346e0a738deb17e754230d7e0810ac5012750bbd85a5a", size = 14312005, upload-time = "2025-05-17T21:32:23.332Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/dd/2238b898e51bd6d389b7389ffb20d7f4c10066d80351187ec8e303a5a475/numpy-2.2.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba10f8411898fc418a521833e014a77d3ca01c15b0c6cdcce6a0d2897e6dbbdf", size = 16821570, upload-time = "2025-05-17T21:32:47.991Z" },
+ { url = "https://files.pythonhosted.org/packages/83/6c/44d0325722cf644f191042bf47eedad61c1e6df2432ed65cbe28509d404e/numpy-2.2.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bd48227a919f1bafbdda0583705e547892342c26fb127219d60a5c36882609d1", size = 15818548, upload-time = "2025-05-17T21:33:11.728Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/9d/81e8216030ce66be25279098789b665d49ff19eef08bfa8cb96d4957f422/numpy-2.2.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9551a499bf125c1d4f9e250377c1ee2eddd02e01eac6644c080162c0c51778ab", size = 18620521, upload-time = "2025-05-17T21:33:39.139Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/fd/e19617b9530b031db51b0926eed5345ce8ddc669bb3bc0044b23e275ebe8/numpy-2.2.6-cp311-cp311-win32.whl", hash = "sha256:0678000bb9ac1475cd454c6b8c799206af8107e310843532b04d49649c717a47", size = 6525866, upload-time = "2025-05-17T21:33:50.273Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0a/f354fb7176b81747d870f7991dc763e157a934c717b67b58456bc63da3df/numpy-2.2.6-cp311-cp311-win_amd64.whl", hash = "sha256:e8213002e427c69c45a52bbd94163084025f533a55a59d6f9c5b820774ef3303", size = 12907455, upload-time = "2025-05-17T21:34:09.135Z" },
+ { url = "https://files.pythonhosted.org/packages/82/5d/c00588b6cf18e1da539b45d3598d3557084990dcc4331960c15ee776ee41/numpy-2.2.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:41c5a21f4a04fa86436124d388f6ed60a9343a6f767fced1a8a71c3fbca038ff", size = 20875348, upload-time = "2025-05-17T21:34:39.648Z" },
+ { url = "https://files.pythonhosted.org/packages/66/ee/560deadcdde6c2f90200450d5938f63a34b37e27ebff162810f716f6a230/numpy-2.2.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de749064336d37e340f640b05f24e9e3dd678c57318c7289d222a8a2f543e90c", size = 14119362, upload-time = "2025-05-17T21:35:01.241Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/65/4baa99f1c53b30adf0acd9a5519078871ddde8d2339dc5a7fde80d9d87da/numpy-2.2.6-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:894b3a42502226a1cac872f840030665f33326fc3dac8e57c607905773cdcde3", size = 5084103, upload-time = "2025-05-17T21:35:10.622Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/89/e5a34c071a0570cc40c9a54eb472d113eea6d002e9ae12bb3a8407fb912e/numpy-2.2.6-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:71594f7c51a18e728451bb50cc60a3ce4e6538822731b2933209a1f3614e9282", size = 6625382, upload-time = "2025-05-17T21:35:21.414Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/35/8c80729f1ff76b3921d5c9487c7ac3de9b2a103b1cd05e905b3090513510/numpy-2.2.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2618db89be1b4e05f7a1a847a9c1c0abd63e63a1607d892dd54668dd92faf87", size = 14018462, upload-time = "2025-05-17T21:35:42.174Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/3d/1e1db36cfd41f895d266b103df00ca5b3cbe965184df824dec5c08c6b803/numpy-2.2.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd83c01228a688733f1ded5201c678f0c53ecc1006ffbc404db9f7a899ac6249", size = 16527618, upload-time = "2025-05-17T21:36:06.711Z" },
+ { url = "https://files.pythonhosted.org/packages/61/c6/03ed30992602c85aa3cd95b9070a514f8b3c33e31124694438d88809ae36/numpy-2.2.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:37c0ca431f82cd5fa716eca9506aefcabc247fb27ba69c5062a6d3ade8cf8f49", size = 15505511, upload-time = "2025-05-17T21:36:29.965Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/25/5761d832a81df431e260719ec45de696414266613c9ee268394dd5ad8236/numpy-2.2.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe27749d33bb772c80dcd84ae7e8df2adc920ae8297400dabec45f0dedb3f6de", size = 18313783, upload-time = "2025-05-17T21:36:56.883Z" },
+ { url = "https://files.pythonhosted.org/packages/57/0a/72d5a3527c5ebffcd47bde9162c39fae1f90138c961e5296491ce778e682/numpy-2.2.6-cp312-cp312-win32.whl", hash = "sha256:4eeaae00d789f66c7a25ac5f34b71a7035bb474e679f410e5e1a94deb24cf2d4", size = 6246506, upload-time = "2025-05-17T21:37:07.368Z" },
+ { url = "https://files.pythonhosted.org/packages/36/fa/8c9210162ca1b88529ab76b41ba02d433fd54fecaf6feb70ef9f124683f1/numpy-2.2.6-cp312-cp312-win_amd64.whl", hash = "sha256:c1f9540be57940698ed329904db803cf7a402f3fc200bfe599334c9bd84a40b2", size = 12614190, upload-time = "2025-05-17T21:37:26.213Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/5c/6657823f4f594f72b5471f1db1ab12e26e890bb2e41897522d134d2a3e81/numpy-2.2.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0811bb762109d9708cca4d0b13c4f67146e3c3b7cf8d34018c722adb2d957c84", size = 20867828, upload-time = "2025-05-17T21:37:56.699Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/9e/14520dc3dadf3c803473bd07e9b2bd1b69bc583cb2497b47000fed2fa92f/numpy-2.2.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:287cc3162b6f01463ccd86be154f284d0893d2b3ed7292439ea97eafa8170e0b", size = 14143006, upload-time = "2025-05-17T21:38:18.291Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/06/7e96c57d90bebdce9918412087fc22ca9851cceaf5567a45c1f404480e9e/numpy-2.2.6-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:f1372f041402e37e5e633e586f62aa53de2eac8d98cbfb822806ce4bbefcb74d", size = 5076765, upload-time = "2025-05-17T21:38:27.319Z" },
+ { url = "https://files.pythonhosted.org/packages/73/ed/63d920c23b4289fdac96ddbdd6132e9427790977d5457cd132f18e76eae0/numpy-2.2.6-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:55a4d33fa519660d69614a9fad433be87e5252f4b03850642f88993f7b2ca566", size = 6617736, upload-time = "2025-05-17T21:38:38.141Z" },
+ { url = "https://files.pythonhosted.org/packages/85/c5/e19c8f99d83fd377ec8c7e0cf627a8049746da54afc24ef0a0cb73d5dfb5/numpy-2.2.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f92729c95468a2f4f15e9bb94c432a9229d0d50de67304399627a943201baa2f", size = 14010719, upload-time = "2025-05-17T21:38:58.433Z" },
+ { url = "https://files.pythonhosted.org/packages/19/49/4df9123aafa7b539317bf6d342cb6d227e49f7a35b99c287a6109b13dd93/numpy-2.2.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bc23a79bfabc5d056d106f9befb8d50c31ced2fbc70eedb8155aec74a45798f", size = 16526072, upload-time = "2025-05-17T21:39:22.638Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/6c/04b5f47f4f32f7c2b0e7260442a8cbcf8168b0e1a41ff1495da42f42a14f/numpy-2.2.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e3143e4451880bed956e706a3220b4e5cf6172ef05fcc397f6f36a550b1dd868", size = 15503213, upload-time = "2025-05-17T21:39:45.865Z" },
+ { url = "https://files.pythonhosted.org/packages/17/0a/5cd92e352c1307640d5b6fec1b2ffb06cd0dabe7d7b8227f97933d378422/numpy-2.2.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b4f13750ce79751586ae2eb824ba7e1e8dba64784086c98cdbbcc6a42112ce0d", size = 18316632, upload-time = "2025-05-17T21:40:13.331Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/3b/5cba2b1d88760ef86596ad0f3d484b1cbff7c115ae2429678465057c5155/numpy-2.2.6-cp313-cp313-win32.whl", hash = "sha256:5beb72339d9d4fa36522fc63802f469b13cdbe4fdab4a288f0c441b74272ebfd", size = 6244532, upload-time = "2025-05-17T21:43:46.099Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/3b/d58c12eafcb298d4e6d0d40216866ab15f59e55d148a5658bb3132311fcf/numpy-2.2.6-cp313-cp313-win_amd64.whl", hash = "sha256:b0544343a702fa80c95ad5d3d608ea3599dd54d4632df855e4c8d24eb6ecfa1c", size = 12610885, upload-time = "2025-05-17T21:44:05.145Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/9e/4bf918b818e516322db999ac25d00c75788ddfd2d2ade4fa66f1f38097e1/numpy-2.2.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0bca768cd85ae743b2affdc762d617eddf3bcf8724435498a1e80132d04879e6", size = 20963467, upload-time = "2025-05-17T21:40:44Z" },
+ { url = "https://files.pythonhosted.org/packages/61/66/d2de6b291507517ff2e438e13ff7b1e2cdbdb7cb40b3ed475377aece69f9/numpy-2.2.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:fc0c5673685c508a142ca65209b4e79ed6740a4ed6b2267dbba90f34b0b3cfda", size = 14225144, upload-time = "2025-05-17T21:41:05.695Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/25/480387655407ead912e28ba3a820bc69af9adf13bcbe40b299d454ec011f/numpy-2.2.6-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:5bd4fc3ac8926b3819797a7c0e2631eb889b4118a9898c84f585a54d475b7e40", size = 5200217, upload-time = "2025-05-17T21:41:15.903Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/4a/6e313b5108f53dcbf3aca0c0f3e9c92f4c10ce57a0a721851f9785872895/numpy-2.2.6-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:fee4236c876c4e8369388054d02d0e9bb84821feb1a64dd59e137e6511a551f8", size = 6712014, upload-time = "2025-05-17T21:41:27.321Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/30/172c2d5c4be71fdf476e9de553443cf8e25feddbe185e0bd88b096915bcc/numpy-2.2.6-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e1dda9c7e08dc141e0247a5b8f49cf05984955246a327d4c48bda16821947b2f", size = 14077935, upload-time = "2025-05-17T21:41:49.738Z" },
+ { url = "https://files.pythonhosted.org/packages/12/fb/9e743f8d4e4d3c710902cf87af3512082ae3d43b945d5d16563f26ec251d/numpy-2.2.6-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f447e6acb680fd307f40d3da4852208af94afdfab89cf850986c3ca00562f4fa", size = 16600122, upload-time = "2025-05-17T21:42:14.046Z" },
+ { url = "https://files.pythonhosted.org/packages/12/75/ee20da0e58d3a66f204f38916757e01e33a9737d0b22373b3eb5a27358f9/numpy-2.2.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:389d771b1623ec92636b0786bc4ae56abafad4a4c513d36a55dce14bd9ce8571", size = 15586143, upload-time = "2025-05-17T21:42:37.464Z" },
+ { url = "https://files.pythonhosted.org/packages/76/95/bef5b37f29fc5e739947e9ce5179ad402875633308504a52d188302319c8/numpy-2.2.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8e9ace4a37db23421249ed236fdcdd457d671e25146786dfc96835cd951aa7c1", size = 18385260, upload-time = "2025-05-17T21:43:05.189Z" },
+ { url = "https://files.pythonhosted.org/packages/09/04/f2f83279d287407cf36a7a8053a5abe7be3622a4363337338f2585e4afda/numpy-2.2.6-cp313-cp313t-win32.whl", hash = "sha256:038613e9fb8c72b0a41f025a7e4c3f0b7a1b5d768ece4796b674c8f3fe13efff", size = 6377225, upload-time = "2025-05-17T21:43:16.254Z" },
+ { url = "https://files.pythonhosted.org/packages/67/0e/35082d13c09c02c011cf21570543d202ad929d961c02a147493cb0c2bdf5/numpy-2.2.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6031dd6dfecc0cf9f668681a37648373bddd6421fff6c66ec1624eed0180ee06", size = 12771374, upload-time = "2025-05-17T21:43:35.479Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/3b/d94a75f4dbf1ef5d321523ecac21ef23a3cd2ac8b78ae2aac40873590229/numpy-2.2.6-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0b605b275d7bd0c640cad4e5d30fa701a8d59302e127e5f79138ad62762c3e3d", size = 21040391, upload-time = "2025-05-17T21:44:35.948Z" },
+ { url = "https://files.pythonhosted.org/packages/17/f4/09b2fa1b58f0fb4f7c7963a1649c64c4d315752240377ed74d9cd878f7b5/numpy-2.2.6-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:7befc596a7dc9da8a337f79802ee8adb30a552a94f792b9c9d18c840055907db", size = 6786754, upload-time = "2025-05-17T21:44:47.446Z" },
+ { url = "https://files.pythonhosted.org/packages/af/30/feba75f143bdc868a1cc3f44ccfa6c4b9ec522b36458e738cd00f67b573f/numpy-2.2.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce47521a4754c8f4593837384bd3424880629f718d87c5d44f8ed763edd63543", size = 16643476, upload-time = "2025-05-17T21:45:11.871Z" },
+ { url = "https://files.pythonhosted.org/packages/37/48/ac2a9584402fb6c0cd5b5d1a91dcf176b15760130dd386bbafdbfe3640bf/numpy-2.2.6-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d042d24c90c41b54fd506da306759e06e568864df8ec17ccc17e9e884634fd00", size = 12812666, upload-time = "2025-05-17T21:45:31.426Z" },
+]
+
+[[package]]
+name = "numpy"
+version = "2.3.2"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+sdist = { url = "https://files.pythonhosted.org/packages/37/7d/3fec4199c5ffb892bed55cff901e4f39a58c81df9c44c280499e92cad264/numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48", size = 20489306, upload-time = "2025-07-24T21:32:07.553Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/96/26/1320083986108998bd487e2931eed2aeedf914b6e8905431487543ec911d/numpy-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:852ae5bed3478b92f093e30f785c98e0cb62fa0a939ed057c31716e18a7a22b9", size = 21259016, upload-time = "2025-07-24T20:24:35.214Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/2b/792b341463fa93fc7e55abbdbe87dac316c5b8cb5e94fb7a59fb6fa0cda5/numpy-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a0e27186e781a69959d0230dd9909b5e26024f8da10683bd6344baea1885168", size = 14451158, upload-time = "2025-07-24T20:24:58.397Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/13/e792d7209261afb0c9f4759ffef6135b35c77c6349a151f488f531d13595/numpy-2.3.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:f0a1a8476ad77a228e41619af2fa9505cf69df928e9aaa165746584ea17fed2b", size = 5379817, upload-time = "2025-07-24T20:25:07.746Z" },
+ { url = "https://files.pythonhosted.org/packages/49/ce/055274fcba4107c022b2113a213c7287346563f48d62e8d2a5176ad93217/numpy-2.3.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:cbc95b3813920145032412f7e33d12080f11dc776262df1712e1638207dde9e8", size = 6913606, upload-time = "2025-07-24T20:25:18.84Z" },
+ { url = "https://files.pythonhosted.org/packages/17/f2/e4d72e6bc5ff01e2ab613dc198d560714971900c03674b41947e38606502/numpy-2.3.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f75018be4980a7324edc5930fe39aa391d5734531b1926968605416ff58c332d", size = 14589652, upload-time = "2025-07-24T20:25:40.356Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/b0/fbeee3000a51ebf7222016e2939b5c5ecf8000a19555d04a18f1e02521b8/numpy-2.3.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20b8200721840f5621b7bd03f8dcd78de33ec522fc40dc2641aa09537df010c3", size = 16938816, upload-time = "2025-07-24T20:26:05.721Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/ec/2f6c45c3484cc159621ea8fc000ac5a86f1575f090cac78ac27193ce82cd/numpy-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1f91e5c028504660d606340a084db4b216567ded1056ea2b4be4f9d10b67197f", size = 16370512, upload-time = "2025-07-24T20:26:30.545Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/01/dd67cf511850bd7aefd6347aaae0956ed415abea741ae107834aae7d6d4e/numpy-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fb1752a3bb9a3ad2d6b090b88a9a0ae1cd6f004ef95f75825e2f382c183b2097", size = 18884947, upload-time = "2025-07-24T20:26:58.24Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/17/2cf60fd3e6a61d006778735edf67a222787a8c1a7842aed43ef96d777446/numpy-2.3.2-cp311-cp311-win32.whl", hash = "sha256:4ae6863868aaee2f57503c7a5052b3a2807cf7a3914475e637a0ecd366ced220", size = 6599494, upload-time = "2025-07-24T20:27:09.786Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/03/0eade211c504bda872a594f045f98ddcc6caef2b7c63610946845e304d3f/numpy-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:240259d6564f1c65424bcd10f435145a7644a65a6811cfc3201c4a429ba79170", size = 13087889, upload-time = "2025-07-24T20:27:29.558Z" },
+ { url = "https://files.pythonhosted.org/packages/13/32/2c7979d39dafb2a25087e12310fc7f3b9d3c7d960df4f4bc97955ae0ce1d/numpy-2.3.2-cp311-cp311-win_arm64.whl", hash = "sha256:4209f874d45f921bde2cff1ffcd8a3695f545ad2ffbef6d3d3c6768162efab89", size = 10459560, upload-time = "2025-07-24T20:27:46.803Z" },
+ { url = "https://files.pythonhosted.org/packages/00/6d/745dd1c1c5c284d17725e5c802ca4d45cfc6803519d777f087b71c9f4069/numpy-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b", size = 20956420, upload-time = "2025-07-24T20:28:18.002Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/96/e7b533ea5740641dd62b07a790af5d9d8fec36000b8e2d0472bd7574105f/numpy-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f", size = 14184660, upload-time = "2025-07-24T20:28:39.522Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/53/102c6122db45a62aa20d1b18c9986f67e6b97e0d6fbc1ae13e3e4c84430c/numpy-2.3.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0", size = 5113382, upload-time = "2025-07-24T20:28:48.544Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/21/376257efcbf63e624250717e82b4fae93d60178f09eb03ed766dbb48ec9c/numpy-2.3.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b", size = 6647258, upload-time = "2025-07-24T20:28:59.104Z" },
+ { url = "https://files.pythonhosted.org/packages/91/ba/f4ebf257f08affa464fe6036e13f2bf9d4642a40228781dc1235da81be9f/numpy-2.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370", size = 14281409, upload-time = "2025-07-24T20:40:30.298Z" },
+ { url = "https://files.pythonhosted.org/packages/59/ef/f96536f1df42c668cbacb727a8c6da7afc9c05ece6d558927fb1722693e1/numpy-2.3.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73", size = 16641317, upload-time = "2025-07-24T20:40:56.625Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/a7/af813a7b4f9a42f498dde8a4c6fcbff8100eed00182cc91dbaf095645f38/numpy-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc", size = 16056262, upload-time = "2025-07-24T20:41:20.797Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/5d/41c4ef8404caaa7f05ed1cfb06afe16a25895260eacbd29b4d84dff2920b/numpy-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be", size = 18579342, upload-time = "2025-07-24T20:41:50.753Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/4f/9950e44c5a11636f4a3af6e825ec23003475cc9a466edb7a759ed3ea63bd/numpy-2.3.2-cp312-cp312-win32.whl", hash = "sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036", size = 6320610, upload-time = "2025-07-24T20:42:01.551Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/2f/244643a5ce54a94f0a9a2ab578189c061e4a87c002e037b0829dd77293b6/numpy-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f", size = 12786292, upload-time = "2025-07-24T20:42:20.738Z" },
+ { url = "https://files.pythonhosted.org/packages/54/cd/7b5f49d5d78db7badab22d8323c1b6ae458fbf86c4fdfa194ab3cd4eb39b/numpy-2.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07", size = 10194071, upload-time = "2025-07-24T20:42:36.657Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/c0/c6bb172c916b00700ed3bf71cb56175fd1f7dbecebf8353545d0b5519f6c/numpy-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3", size = 20949074, upload-time = "2025-07-24T20:43:07.813Z" },
+ { url = "https://files.pythonhosted.org/packages/20/4e/c116466d22acaf4573e58421c956c6076dc526e24a6be0903219775d862e/numpy-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b", size = 14177311, upload-time = "2025-07-24T20:43:29.335Z" },
+ { url = "https://files.pythonhosted.org/packages/78/45/d4698c182895af189c463fc91d70805d455a227261d950e4e0f1310c2550/numpy-2.3.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6", size = 5106022, upload-time = "2025-07-24T20:43:37.999Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/76/3e6880fef4420179309dba72a8c11f6166c431cf6dee54c577af8906f914/numpy-2.3.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089", size = 6640135, upload-time = "2025-07-24T20:43:49.28Z" },
+ { url = "https://files.pythonhosted.org/packages/34/fa/87ff7f25b3c4ce9085a62554460b7db686fef1e0207e8977795c7b7d7ba1/numpy-2.3.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2", size = 14278147, upload-time = "2025-07-24T20:44:10.328Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/0f/571b2c7a3833ae419fe69ff7b479a78d313581785203cc70a8db90121b9a/numpy-2.3.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f", size = 16635989, upload-time = "2025-07-24T20:44:34.88Z" },
+ { url = "https://files.pythonhosted.org/packages/24/5a/84ae8dca9c9a4c592fe11340b36a86ffa9fd3e40513198daf8a97839345c/numpy-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee", size = 16053052, upload-time = "2025-07-24T20:44:58.872Z" },
+ { url = "https://files.pythonhosted.org/packages/57/7c/e5725d99a9133b9813fcf148d3f858df98511686e853169dbaf63aec6097/numpy-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6", size = 18577955, upload-time = "2025-07-24T20:45:26.714Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/11/7c546fcf42145f29b71e4d6f429e96d8d68e5a7ba1830b2e68d7418f0bbd/numpy-2.3.2-cp313-cp313-win32.whl", hash = "sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b", size = 6311843, upload-time = "2025-07-24T20:49:24.444Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/6f/a428fd1cb7ed39b4280d057720fed5121b0d7754fd2a9768640160f5517b/numpy-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56", size = 12782876, upload-time = "2025-07-24T20:49:43.227Z" },
+ { url = "https://files.pythonhosted.org/packages/65/85/4ea455c9040a12595fb6c43f2c217257c7b52dd0ba332c6a6c1d28b289fe/numpy-2.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2", size = 10192786, upload-time = "2025-07-24T20:49:59.443Z" },
+ { url = "https://files.pythonhosted.org/packages/80/23/8278f40282d10c3f258ec3ff1b103d4994bcad78b0cba9208317f6bb73da/numpy-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab", size = 21047395, upload-time = "2025-07-24T20:45:58.821Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/2d/624f2ce4a5df52628b4ccd16a4f9437b37c35f4f8a50d00e962aae6efd7a/numpy-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2", size = 14300374, upload-time = "2025-07-24T20:46:20.207Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/62/ff1e512cdbb829b80a6bd08318a58698867bca0ca2499d101b4af063ee97/numpy-2.3.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a", size = 5228864, upload-time = "2025-07-24T20:46:30.58Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/8e/74bc18078fff03192d4032cfa99d5a5ca937807136d6f5790ce07ca53515/numpy-2.3.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286", size = 6737533, upload-time = "2025-07-24T20:46:46.111Z" },
+ { url = "https://files.pythonhosted.org/packages/19/ea/0731efe2c9073ccca5698ef6a8c3667c4cf4eea53fcdcd0b50140aba03bc/numpy-2.3.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8", size = 14352007, upload-time = "2025-07-24T20:47:07.1Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/90/36be0865f16dfed20f4bc7f75235b963d5939707d4b591f086777412ff7b/numpy-2.3.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a", size = 16701914, upload-time = "2025-07-24T20:47:32.459Z" },
+ { url = "https://files.pythonhosted.org/packages/94/30/06cd055e24cb6c38e5989a9e747042b4e723535758e6153f11afea88c01b/numpy-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91", size = 16132708, upload-time = "2025-07-24T20:47:58.129Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/14/ecede608ea73e58267fd7cb78f42341b3b37ba576e778a1a06baffbe585c/numpy-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5", size = 18651678, upload-time = "2025-07-24T20:48:25.402Z" },
+ { url = "https://files.pythonhosted.org/packages/40/f3/2fe6066b8d07c3685509bc24d56386534c008b462a488b7f503ba82b8923/numpy-2.3.2-cp313-cp313t-win32.whl", hash = "sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5", size = 6441832, upload-time = "2025-07-24T20:48:37.181Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/ba/0937d66d05204d8f28630c9c60bc3eda68824abde4cf756c4d6aad03b0c6/numpy-2.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450", size = 12927049, upload-time = "2025-07-24T20:48:56.24Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/ed/13542dd59c104d5e654dfa2ac282c199ba64846a74c2c4bcdbc3a0f75df1/numpy-2.3.2-cp313-cp313t-win_arm64.whl", hash = "sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a", size = 10262935, upload-time = "2025-07-24T20:49:13.136Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/7c/7659048aaf498f7611b783e000c7268fcc4dcf0ce21cd10aad7b2e8f9591/numpy-2.3.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a", size = 20950906, upload-time = "2025-07-24T20:50:30.346Z" },
+ { url = "https://files.pythonhosted.org/packages/80/db/984bea9d4ddf7112a04cfdfb22b1050af5757864cfffe8e09e44b7f11a10/numpy-2.3.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b", size = 14185607, upload-time = "2025-07-24T20:50:51.923Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/76/b3d6f414f4eca568f469ac112a3b510938d892bc5a6c190cb883af080b77/numpy-2.3.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125", size = 5114110, upload-time = "2025-07-24T20:51:01.041Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/d2/6f5e6826abd6bca52392ed88fe44a4b52aacb60567ac3bc86c67834c3a56/numpy-2.3.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19", size = 6642050, upload-time = "2025-07-24T20:51:11.64Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/43/f12b2ade99199e39c73ad182f103f9d9791f48d885c600c8e05927865baf/numpy-2.3.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f", size = 14296292, upload-time = "2025-07-24T20:51:33.488Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/f9/77c07d94bf110a916b17210fac38680ed8734c236bfed9982fd8524a7b47/numpy-2.3.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5", size = 16638913, upload-time = "2025-07-24T20:51:58.517Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/d1/9d9f2c8ea399cc05cfff8a7437453bd4e7d894373a93cdc46361bbb49a7d/numpy-2.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58", size = 16071180, upload-time = "2025-07-24T20:52:22.827Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/41/82e2c68aff2a0c9bf315e47d61951099fed65d8cb2c8d9dc388cb87e947e/numpy-2.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0", size = 18576809, upload-time = "2025-07-24T20:52:51.015Z" },
+ { url = "https://files.pythonhosted.org/packages/14/14/4b4fd3efb0837ed252d0f583c5c35a75121038a8c4e065f2c259be06d2d8/numpy-2.3.2-cp314-cp314-win32.whl", hash = "sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2", size = 6366410, upload-time = "2025-07-24T20:56:44.949Z" },
+ { url = "https://files.pythonhosted.org/packages/11/9e/b4c24a6b8467b61aced5c8dc7dcfce23621baa2e17f661edb2444a418040/numpy-2.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b", size = 12918821, upload-time = "2025-07-24T20:57:06.479Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/0f/0dc44007c70b1007c1cef86b06986a3812dd7106d8f946c09cfa75782556/numpy-2.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910", size = 10477303, upload-time = "2025-07-24T20:57:22.879Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/3e/075752b79140b78ddfc9c0a1634d234cfdbc6f9bbbfa6b7504e445ad7d19/numpy-2.3.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e", size = 21047524, upload-time = "2025-07-24T20:53:22.086Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/6d/60e8247564a72426570d0e0ea1151b95ce5bd2f1597bb878a18d32aec855/numpy-2.3.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45", size = 14300519, upload-time = "2025-07-24T20:53:44.053Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/73/d8326c442cd428d47a067070c3ac6cc3b651a6e53613a1668342a12d4479/numpy-2.3.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b", size = 5228972, upload-time = "2025-07-24T20:53:53.81Z" },
+ { url = "https://files.pythonhosted.org/packages/34/2e/e71b2d6dad075271e7079db776196829019b90ce3ece5c69639e4f6fdc44/numpy-2.3.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2", size = 6737439, upload-time = "2025-07-24T20:54:04.742Z" },
+ { url = "https://files.pythonhosted.org/packages/15/b0/d004bcd56c2c5e0500ffc65385eb6d569ffd3363cb5e593ae742749b2daa/numpy-2.3.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0", size = 14352479, upload-time = "2025-07-24T20:54:25.819Z" },
+ { url = "https://files.pythonhosted.org/packages/11/e3/285142fcff8721e0c99b51686426165059874c150ea9ab898e12a492e291/numpy-2.3.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0", size = 16702805, upload-time = "2025-07-24T20:54:50.814Z" },
+ { url = "https://files.pythonhosted.org/packages/33/c3/33b56b0e47e604af2c7cd065edca892d180f5899599b76830652875249a3/numpy-2.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2", size = 16133830, upload-time = "2025-07-24T20:55:17.306Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/ae/7b1476a1f4d6a48bc669b8deb09939c56dd2a439db1ab03017844374fb67/numpy-2.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf", size = 18652665, upload-time = "2025-07-24T20:55:46.665Z" },
+ { url = "https://files.pythonhosted.org/packages/14/ba/5b5c9978c4bb161034148ade2de9db44ec316fab89ce8c400db0e0c81f86/numpy-2.3.2-cp314-cp314t-win32.whl", hash = "sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1", size = 6514777, upload-time = "2025-07-24T20:55:57.66Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/46/3dbaf0ae7c17cdc46b9f662c56da2054887b8d9e737c1476f335c83d33db/numpy-2.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b", size = 13111856, upload-time = "2025-07-24T20:56:17.318Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/9e/1652778bce745a67b5fe05adde60ed362d38eb17d919a540e813d30f6874/numpy-2.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631", size = 10544226, upload-time = "2025-07-24T20:56:34.509Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/ea/50ebc91d28b275b23b7128ef25c3d08152bc4068f42742867e07a870a42a/numpy-2.3.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:14a91ebac98813a49bc6aa1a0dfc09513dcec1d97eaf31ca21a87221a1cdcb15", size = 21130338, upload-time = "2025-07-24T20:57:54.37Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/57/cdd5eac00dd5f137277355c318a955c0d8fb8aa486020c22afd305f8b88f/numpy-2.3.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:71669b5daae692189540cffc4c439468d35a3f84f0c88b078ecd94337f6cb0ec", size = 14375776, upload-time = "2025-07-24T20:58:16.303Z" },
+ { url = "https://files.pythonhosted.org/packages/83/85/27280c7f34fcd305c2209c0cdca4d70775e4859a9eaa92f850087f8dea50/numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:69779198d9caee6e547adb933941ed7520f896fd9656834c300bdf4dd8642712", size = 5304882, upload-time = "2025-07-24T20:58:26.199Z" },
+ { url = "https://files.pythonhosted.org/packages/48/b4/6500b24d278e15dd796f43824e69939d00981d37d9779e32499e823aa0aa/numpy-2.3.2-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2c3271cc4097beb5a60f010bcc1cc204b300bb3eafb4399376418a83a1c6373c", size = 6818405, upload-time = "2025-07-24T20:58:37.341Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/c9/142c1e03f199d202da8e980c2496213509291b6024fd2735ad28ae7065c7/numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8446acd11fe3dc1830568c941d44449fd5cb83068e5c70bd5a470d323d448296", size = 14419651, upload-time = "2025-07-24T20:58:59.048Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/95/8023e87cbea31a750a6c00ff9427d65ebc5fef104a136bfa69f76266d614/numpy-2.3.2-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa098a5ab53fa407fded5870865c6275a5cd4101cfdef8d6fafc48286a96e981", size = 16760166, upload-time = "2025-07-24T21:28:56.38Z" },
+ { url = "https://files.pythonhosted.org/packages/78/e3/6690b3f85a05506733c7e90b577e4762517404ea78bab2ca3a5cb1aeb78d/numpy-2.3.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6936aff90dda378c09bea075af0d9c675fe3a977a9d2402f95a87f440f59f619", size = 12977811, upload-time = "2025-07-24T21:29:18.234Z" },
+]
+
+[[package]]
+name = "nvidia-cublas-cu12"
+version = "12.8.4.1"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/dc/61/e24b560ab2e2eaeb3c839129175fb330dfcfc29e5203196e5541a4c44682/nvidia_cublas_cu12-12.8.4.1-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:8ac4e771d5a348c551b2a426eda6193c19aa630236b418086020df5ba9667142", size = 594346921, upload-time = "2025-03-07T01:44:31.254Z" },
+]
+
+[[package]]
+name = "nvidia-cuda-cupti-cu12"
+version = "12.8.90"
+source = { registry = "https://pypi.org/simple" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f8/02/2adcaa145158bf1a8295d83591d22e4103dbfd821bcaf6f3f53151ca4ffa/nvidia_cuda_cupti_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea0cb07ebda26bb9b29ba82cda34849e73c166c18162d3913575b0c9db9a6182", size = 10248621, upload-time = "2025-03-07T01:40:21.213Z" },
+]
+
+[[package]]
+name = "nvidia-cuda-nvrtc-cu12"
+version = "12.8.93"
+source = { registry = "https://pypi.nvidia.com/" }
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cuda-nvrtc-cu12/nvidia_cuda_nvrtc_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:a7756528852ef889772a84c6cd89d41dfa74667e24cca16bb31f8f061e3e9994" },
+]
+
+[[package]]
+name = "nvidia-cuda-runtime-cu12"
+version = "12.8.90"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cuda-runtime-cu12/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:52bf7bbee900262ffefe5e9d5a2a69a30d97e2bc5bb6cc866688caa976966e3d" },
+ { url = "https://pypi.nvidia.com/nvidia-cuda-runtime-cu12/nvidia_cuda_runtime_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adade8dcbd0edf427b7204d480d6066d33902cab2a4707dcfc48a2d0fd44ab90" },
+ { url = "https://pypi.nvidia.com/nvidia-cuda-runtime-cu12/nvidia_cuda_runtime_cu12-12.8.90-py3-none-win_amd64.whl", hash = "sha256:c0c6027f01505bfed6c3b21ec546f69c687689aad5f1a377554bc6ca4aa993a8" },
+]
+
+[[package]]
+name = "nvidia-cuda-runtime-cu12"
+version = "12.9.79"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cuda-runtime-cu12/nvidia_cuda_runtime_cu12-12.9.79-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83469a846206f2a733db0c42e223589ab62fd2fabac4432d2f8802de4bded0a4" },
+]
+
+[[package]]
+name = "nvidia-cudnn-cu12"
+version = "9.10.2.21"
+source = { registry = "https://pypi.nvidia.com/" }
+dependencies = [
+ { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cudnn-cu12/nvidia_cudnn_cu12-9.10.2.21-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:949452be657fa16687d0930933f032835951ef0892b37d2d53824d1a84dc97a8" },
+]
+
+[[package]]
+name = "nvidia-cufft-cu12"
+version = "11.3.3.83"
+source = { registry = "https://pypi.nvidia.com/" }
+dependencies = [
+ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cufft-cu12/nvidia_cufft_cu12-11.3.3.83-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d2dd21ec0b88cf61b62e6b43564355e5222e4a3fb394cac0db101f2dd0d4f74" },
+]
+
+[[package]]
+name = "nvidia-cufile-cu12"
+version = "1.13.1.3"
+source = { registry = "https://pypi.nvidia.com/" }
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cufile-cu12/nvidia_cufile_cu12-1.13.1.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1d069003be650e131b21c932ec3d8969c1715379251f8d23a1860554b1cb24fc" },
+]
+
+[[package]]
+name = "nvidia-curand-cu12"
+version = "10.3.9.90"
+source = { registry = "https://pypi.nvidia.com/" }
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-curand-cu12/nvidia_curand_cu12-10.3.9.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:b32331d4f4df5d6eefa0554c565b626c7216f87a06a4f56fab27c3b68a830ec9" },
+]
+
+[[package]]
+name = "nvidia-cusolver-cu12"
+version = "11.7.3.90"
+source = { registry = "https://pypi.nvidia.com/" }
+dependencies = [
+ { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+ { name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cusolver-cu12/nvidia_cusolver_cu12-11.7.3.90-py3-none-manylinux_2_27_x86_64.whl", hash = "sha256:4376c11ad263152bd50ea295c05370360776f8c3427b30991df774f9fb26c450" },
+]
+
+[[package]]
+name = "nvidia-cusparse-cu12"
+version = "12.5.8.93"
+source = { registry = "https://pypi.nvidia.com/" }
+dependencies = [
+ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cusparse-cu12/nvidia_cusparse_cu12-12.5.8.93-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1ec05d76bbbd8b61b06a80e1eaf8cf4959c3d4ce8e711b65ebd0443bb0ebb13b" },
+]
+
+[[package]]
+name = "nvidia-cusparselt-cu12"
+version = "0.7.1"
+source = { registry = "https://pypi.nvidia.com/" }
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-cusparselt-cu12/nvidia_cusparselt_cu12-0.7.1-py3-none-manylinux2014_x86_64.whl", hash = "sha256:f1bb701d6b930d5a7cea44c19ceb973311500847f81b634d802b7b539dc55623" },
+]
+
+[[package]]
+name = "nvidia-nccl-cu12"
+version = "2.27.3"
+source = { registry = "https://pypi.nvidia.com/" }
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-nccl-cu12/nvidia_nccl_cu12-2.27.3-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:adf27ccf4238253e0b826bce3ff5fa532d65fc42322c8bfdfaf28024c0fbe039" },
+]
+
+[[package]]
+name = "nvidia-nvjitlink-cu12"
+version = "12.8.93"
+source = { registry = "https://pypi.nvidia.com/" }
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-nvjitlink-cu12/nvidia_nvjitlink_cu12-12.8.93-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:81ff63371a7ebd6e6451970684f916be2eab07321b73c9d244dc2b4da7f73b88" },
+]
+
+[[package]]
+name = "nvidia-nvtx-cu12"
+version = "12.8.90"
+source = { registry = "https://pypi.nvidia.com/" }
+wheels = [
+ { url = "https://pypi.nvidia.com/nvidia-nvtx-cu12/nvidia_nvtx_cu12-12.8.90-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b17e2001cc0d751a5bc2c6ec6d26ad95913324a4adb86788c944f8ce9ba441f" },
+]
+
+[[package]]
+name = "onnx"
+version = "1.18.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "protobuf" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3d/60/e56e8ec44ed34006e6d4a73c92a04d9eea6163cc12440e35045aec069175/onnx-1.18.0.tar.gz", hash = "sha256:3d8dbf9e996629131ba3aa1afd1d8239b660d1f830c6688dd7e03157cccd6b9c", size = 12563009, upload-time = "2025-05-12T22:03:09.626Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8e/e3/ab8a09c0af43373e0422de461956a1737581325260659aeffae22a7dad18/onnx-1.18.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:4a3b50d94620e2c7c1404d1d59bc53e665883ae3fecbd856cc86da0639fd0fc3", size = 18280145, upload-time = "2025-05-12T22:01:49.875Z" },
+ { url = "https://files.pythonhosted.org/packages/04/5b/3cfd183961a0a872fe29c95f8d07264890ec65c75c94b99a4dabc950df29/onnx-1.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e189652dad6e70a0465035c55cc565c27aa38803dd4f4e74e4b952ee1c2de94b", size = 17422721, upload-time = "2025-05-12T22:01:52.841Z" },
+ { url = "https://files.pythonhosted.org/packages/58/52/fa649429016c5790f68c614cdebfbefd3e72ba1c458966305297d540f713/onnx-1.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfb1f271b1523b29f324bfd223f6a4cfbdc5a2f2f16e73563671932d33663365", size = 17584220, upload-time = "2025-05-12T22:01:56.458Z" },
+ { url = "https://files.pythonhosted.org/packages/42/52/dc166de41a5f72738b0bdfb2a19e0ebe4743cf3ecc9ae381ea3425bcb332/onnx-1.18.0-cp310-cp310-win32.whl", hash = "sha256:e03071041efd82e0317b3c45433b2f28146385b80f26f82039bc68048ac1a7a0", size = 15734494, upload-time = "2025-05-12T22:01:59.704Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/f9/e766a3b85b7651ddfc5f9648e0e9dc24e88b7e88ea7f8c23187530e818ea/onnx-1.18.0-cp310-cp310-win_amd64.whl", hash = "sha256:9235b3493951e11e75465d56f4cd97e3e9247f096160dd3466bfabe4cbc938bc", size = 15848421, upload-time = "2025-05-12T22:02:03.01Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/3a/a336dac4db1eddba2bf577191e5b7d3e4c26fcee5ec518a5a5b11d13540d/onnx-1.18.0-cp311-cp311-macosx_12_0_universal2.whl", hash = "sha256:735e06d8d0cf250dc498f54038831401063c655a8d6e5975b2527a4e7d24be3e", size = 18281831, upload-time = "2025-05-12T22:02:06.429Z" },
+ { url = "https://files.pythonhosted.org/packages/02/3a/56475a111120d1e5d11939acbcbb17c92198c8e64a205cd68e00bdfd8a1f/onnx-1.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73160799472e1a86083f786fecdf864cf43d55325492a9b5a1cfa64d8a523ecc", size = 17424359, upload-time = "2025-05-12T22:02:09.866Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/03/5eb5e9ef446ed9e78c4627faf3c1bc25e0f707116dd00e9811de232a8df5/onnx-1.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6acafb3823238bbe8f4340c7ac32fb218689442e074d797bee1c5c9a02fdae75", size = 17586006, upload-time = "2025-05-12T22:02:13.217Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/4e/70943125729ce453271a6e46bb847b4a612496f64db6cbc6cb1f49f41ce1/onnx-1.18.0-cp311-cp311-win32.whl", hash = "sha256:4c8c4bbda760c654e65eaffddb1a7de71ec02e60092d33f9000521f897c99be9", size = 15734988, upload-time = "2025-05-12T22:02:16.561Z" },
+ { url = "https://files.pythonhosted.org/packages/44/b0/435fd764011911e8f599e3361f0f33425b1004662c1ea33a0ad22e43db2d/onnx-1.18.0-cp311-cp311-win_amd64.whl", hash = "sha256:a5810194f0f6be2e58c8d6dedc6119510df7a14280dd07ed5f0f0a85bd74816a", size = 15849576, upload-time = "2025-05-12T22:02:19.569Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/f0/9e31f4b4626d60f1c034f71b411810bc9fafe31f4e7dd3598effd1b50e05/onnx-1.18.0-cp311-cp311-win_arm64.whl", hash = "sha256:aa1b7483fac6cdec26922174fc4433f8f5c2f239b1133c5625063bb3b35957d0", size = 15822961, upload-time = "2025-05-12T22:02:22.735Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/fe/16228aca685392a7114625b89aae98b2dc4058a47f0f467a376745efe8d0/onnx-1.18.0-cp312-cp312-macosx_12_0_universal2.whl", hash = "sha256:521bac578448667cbb37c50bf05b53c301243ede8233029555239930996a625b", size = 18285770, upload-time = "2025-05-12T22:02:26.116Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/77/ba50a903a9b5e6f9be0fa50f59eb2fca4a26ee653375408fbc72c3acbf9f/onnx-1.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4da451bf1c5ae381f32d430004a89f0405bc57a8471b0bddb6325a5b334aa40", size = 17421291, upload-time = "2025-05-12T22:02:29.645Z" },
+ { url = "https://files.pythonhosted.org/packages/11/23/25ec2ba723ac62b99e8fed6d7b59094dadb15e38d4c007331cc9ae3dfa5f/onnx-1.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99afac90b4cdb1471432203c3c1f74e16549c526df27056d39f41a9a47cfb4af", size = 17584084, upload-time = "2025-05-12T22:02:32.789Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/4d/2c253a36070fb43f340ff1d2c450df6a9ef50b938adcd105693fee43c4ee/onnx-1.18.0-cp312-cp312-win32.whl", hash = "sha256:ee159b41a3ae58d9c7341cf432fc74b96aaf50bd7bb1160029f657b40dc69715", size = 15734892, upload-time = "2025-05-12T22:02:35.527Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/92/048ba8fafe6b2b9a268ec2fb80def7e66c0b32ab2cae74de886981f05a27/onnx-1.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:102c04edc76b16e9dfeda5a64c1fccd7d3d2913b1544750c01d38f1ac3c04e05", size = 15850336, upload-time = "2025-05-12T22:02:38.545Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/66/bbc4ffedd44165dcc407a51ea4c592802a5391ce3dc94aa5045350f64635/onnx-1.18.0-cp312-cp312-win_arm64.whl", hash = "sha256:911b37d724a5d97396f3c2ef9ea25361c55cbc9aa18d75b12a52b620b67145af", size = 15823802, upload-time = "2025-05-12T22:02:42.037Z" },
+ { url = "https://files.pythonhosted.org/packages/45/da/9fb8824513fae836239276870bfcc433fa2298d34ed282c3a47d3962561b/onnx-1.18.0-cp313-cp313-macosx_12_0_universal2.whl", hash = "sha256:030d9f5f878c5f4c0ff70a4545b90d7812cd6bfe511de2f3e469d3669c8cff95", size = 18285906, upload-time = "2025-05-12T22:02:45.01Z" },
+ { url = "https://files.pythonhosted.org/packages/05/e8/762b5fb5ed1a2b8e9a4bc5e668c82723b1b789c23b74e6b5a3356731ae4e/onnx-1.18.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8521544987d713941ee1e591520044d35e702f73dc87e91e6d4b15a064ae813d", size = 17421486, upload-time = "2025-05-12T22:02:48.467Z" },
+ { url = "https://files.pythonhosted.org/packages/12/bb/471da68df0364f22296456c7f6becebe0a3da1ba435cdb371099f516da6e/onnx-1.18.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c137eecf6bc618c2f9398bcc381474b55c817237992b169dfe728e169549e8f", size = 17583581, upload-time = "2025-05-12T22:02:51.784Z" },
+ { url = "https://files.pythonhosted.org/packages/76/0d/01a95edc2cef6ad916e04e8e1267a9286f15b55c90cce5d3cdeb359d75d6/onnx-1.18.0-cp313-cp313-win32.whl", hash = "sha256:6c093ffc593e07f7e33862824eab9225f86aa189c048dd43ffde207d7041a55f", size = 15734621, upload-time = "2025-05-12T22:02:54.62Z" },
+ { url = "https://files.pythonhosted.org/packages/64/95/253451a751be32b6173a648b68f407188009afa45cd6388780c330ff5d5d/onnx-1.18.0-cp313-cp313-win_amd64.whl", hash = "sha256:230b0fb615e5b798dc4a3718999ec1828360bc71274abd14f915135eab0255f1", size = 15850472, upload-time = "2025-05-12T22:02:57.54Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/b1/6fd41b026836df480a21687076e0f559bc3ceeac90f2be8c64b4a7a1f332/onnx-1.18.0-cp313-cp313-win_arm64.whl", hash = "sha256:6f91930c1a284135db0f891695a263fc876466bf2afbd2215834ac08f600cfca", size = 15823808, upload-time = "2025-05-12T22:03:00.305Z" },
+ { url = "https://files.pythonhosted.org/packages/70/f3/499e53dd41fa7302f914dd18543da01e0786a58b9a9d347497231192001f/onnx-1.18.0-cp313-cp313t-macosx_12_0_universal2.whl", hash = "sha256:2f4d37b0b5c96a873887652d1cbf3f3c70821b8c66302d84b0f0d89dd6e47653", size = 18316526, upload-time = "2025-05-12T22:03:03.691Z" },
+ { url = "https://files.pythonhosted.org/packages/84/dd/6abe5d7bd23f5ed3ade8352abf30dff1c7a9e97fc1b0a17b5d7c726e98a9/onnx-1.18.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a69afd0baa372162948b52c13f3aa2730123381edf926d7ef3f68ca7cec6d0d0", size = 15865055, upload-time = "2025-05-12T22:03:06.663Z" },
+]
+
+[[package]]
+name = "onnxruntime"
+version = "1.22.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "coloredlogs" },
+ { name = "flatbuffers" },
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "packaging" },
+ { name = "protobuf" },
+ { name = "sympy" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/b9/664a1ffee62fa51529fac27b37409d5d28cadee8d97db806fcba68339b7e/onnxruntime-1.22.1-cp310-cp310-macosx_13_0_universal2.whl", hash = "sha256:80e7f51da1f5201c1379b8d6ef6170505cd800e40da216290f5e06be01aadf95", size = 34319864, upload-time = "2025-07-10T19:15:15.371Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/64/bc7221e92c994931024e22b22401b962c299e991558c3d57f7e34538b4b9/onnxruntime-1.22.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89ddfdbbdaf7e3a59515dee657f6515601d55cb21a0f0f48c81aefc54ff1b73", size = 14472246, upload-time = "2025-07-10T19:15:19.403Z" },
+ { url = "https://files.pythonhosted.org/packages/84/57/901eddbfb59ac4d008822b236450d5765cafcd450c787019416f8d3baf11/onnxruntime-1.22.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bddc75868bcf6f9ed76858a632f65f7b1846bdcefc6d637b1e359c2c68609964", size = 16459905, upload-time = "2025-07-10T19:15:21.749Z" },
+ { url = "https://files.pythonhosted.org/packages/de/90/d6a1eb9b47e66a18afe7d1cf7cf0b2ef966ffa6f44d9f32d94c2be2860fb/onnxruntime-1.22.1-cp310-cp310-win_amd64.whl", hash = "sha256:01e2f21b2793eb0c8642d2be3cee34cc7d96b85f45f6615e4e220424158877ce", size = 12689001, upload-time = "2025-07-10T19:15:23.848Z" },
+ { url = "https://files.pythonhosted.org/packages/82/ff/4a1a6747e039ef29a8d4ee4510060e9a805982b6da906a3da2306b7a3be6/onnxruntime-1.22.1-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:f4581bccb786da68725d8eac7c63a8f31a89116b8761ff8b4989dc58b61d49a0", size = 34324148, upload-time = "2025-07-10T19:15:26.584Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/05/9f1929723f1cca8c9fb1b2b97ac54ce61362c7201434d38053ea36ee4225/onnxruntime-1.22.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7ae7526cf10f93454beb0f751e78e5cb7619e3b92f9fc3bd51aa6f3b7a8977e5", size = 14473779, upload-time = "2025-07-10T19:15:30.183Z" },
+ { url = "https://files.pythonhosted.org/packages/59/f3/c93eb4167d4f36ea947930f82850231f7ce0900cb00e1a53dc4995b60479/onnxruntime-1.22.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f6effa1299ac549a05c784d50292e3378dbbf010346ded67400193b09ddc2f04", size = 16460799, upload-time = "2025-07-10T19:15:33.005Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/01/e536397b03e4462d3260aee5387e6f606c8fa9d2b20b1728f988c3c72891/onnxruntime-1.22.1-cp311-cp311-win_amd64.whl", hash = "sha256:f28a42bb322b4ca6d255531bb334a2b3e21f172e37c1741bd5e66bc4b7b61f03", size = 12689881, upload-time = "2025-07-10T19:15:35.501Z" },
+ { url = "https://files.pythonhosted.org/packages/48/70/ca2a4d38a5deccd98caa145581becb20c53684f451e89eb3a39915620066/onnxruntime-1.22.1-cp312-cp312-macosx_13_0_universal2.whl", hash = "sha256:a938d11c0dc811badf78e435daa3899d9af38abee950d87f3ab7430eb5b3cf5a", size = 34342883, upload-time = "2025-07-10T19:15:38.223Z" },
+ { url = "https://files.pythonhosted.org/packages/29/e5/00b099b4d4f6223b610421080d0eed9327ef9986785c9141819bbba0d396/onnxruntime-1.22.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:984cea2a02fcc5dfea44ade9aca9fe0f7a8a2cd6f77c258fc4388238618f3928", size = 14473861, upload-time = "2025-07-10T19:15:42.911Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/50/519828a5292a6ccd8d5cd6d2f72c6b36ea528a2ef68eca69647732539ffa/onnxruntime-1.22.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2d39a530aff1ec8d02e365f35e503193991417788641b184f5b1e8c9a6d5ce8d", size = 16475713, upload-time = "2025-07-10T19:15:45.452Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/54/7139d463bb0a312890c9a5db87d7815d4a8cce9e6f5f28d04f0b55fcb160/onnxruntime-1.22.1-cp312-cp312-win_amd64.whl", hash = "sha256:6a64291d57ea966a245f749eb970f4fa05a64d26672e05a83fdb5db6b7d62f87", size = 12690910, upload-time = "2025-07-10T19:15:47.478Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/39/77cefa829740bd830915095d8408dce6d731b244e24b1f64fe3df9f18e86/onnxruntime-1.22.1-cp313-cp313-macosx_13_0_universal2.whl", hash = "sha256:d29c7d87b6cbed8fecfd09dca471832384d12a69e1ab873e5effbb94adc3e966", size = 34342026, upload-time = "2025-07-10T19:15:50.266Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/a6/444291524cb52875b5de980a6e918072514df63a57a7120bf9dfae3aeed1/onnxruntime-1.22.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:460487d83b7056ba98f1f7bac80287224c31d8149b15712b0d6f5078fcc33d0f", size = 14474014, upload-time = "2025-07-10T19:15:53.991Z" },
+ { url = "https://files.pythonhosted.org/packages/87/9d/45a995437879c18beff26eacc2322f4227224d04c6ac3254dce2e8950190/onnxruntime-1.22.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b0c37070268ba4e02a1a9d28560cd00cd1e94f0d4f275cbef283854f861a65fa", size = 16475427, upload-time = "2025-07-10T19:15:56.067Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/06/9c765e66ad32a7e709ce4cb6b95d7eaa9cb4d92a6e11ea97c20ffecaf765/onnxruntime-1.22.1-cp313-cp313-win_amd64.whl", hash = "sha256:70980d729145a36a05f74b573435531f55ef9503bcda81fc6c3d6b9306199982", size = 12690841, upload-time = "2025-07-10T19:15:58.337Z" },
+ { url = "https://files.pythonhosted.org/packages/52/8c/02af24ee1c8dce4e6c14a1642a7a56cebe323d2fa01d9a360a638f7e4b75/onnxruntime-1.22.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:33a7980bbc4b7f446bac26c3785652fe8730ed02617d765399e89ac7d44e0f7d", size = 14479333, upload-time = "2025-07-10T19:16:00.544Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/15/d75fd66aba116ce3732bb1050401394c5ec52074c4f7ee18db8838dd4667/onnxruntime-1.22.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7e823624b015ea879d976cbef8bfaed2f7e2cc233d7506860a76dd37f8f381", size = 16477261, upload-time = "2025-07-10T19:16:03.226Z" },
+]
+
+[[package]]
+name = "onnxruntime-gpu"
+version = "1.22.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "coloredlogs" },
+ { name = "flatbuffers" },
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "packaging" },
+ { name = "protobuf" },
+ { name = "sympy" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/27/76/81de592072d6a41553b1523e15447f0ef94392e8f4cb98fda42909f24f9b/onnxruntime_gpu-1.22.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:965da7d33a54917e8e5176f292cc22640819f328370f4fb86087908745b03708", size = 283205327, upload-time = "2025-05-09T19:39:24.231Z" },
+ { url = "https://files.pythonhosted.org/packages/74/7b/636cb1e19cf1340e4eaf0da6a4cc10cf2ae56f00693b4ff61c28dd0c7160/onnxruntime_gpu-1.22.0-cp310-cp310-win_amd64.whl", hash = "sha256:6db51c375ffe3887fe5cce61a0ae054e5e9c1eaf0603f8a106589a819976e4b2", size = 214923182, upload-time = "2025-05-09T19:32:35.985Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/10/cd3e7e289f7b46eb93e38b5c90139f735bf1ea7f03d4b17ceb0e998e5bb6/onnxruntime_gpu-1.22.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d30c1512f22b1f01bacb4f177d49cbefd23e0f4bef56066f1282992d133e6ff8", size = 283204403, upload-time = "2025-05-09T19:39:38.278Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/47/313ee7998ef63dd7533200966972056fc5f3c7dd3bdfd9c49ae833bb5108/onnxruntime_gpu-1.22.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f1719f7cca76075b398a7d0466ead62d78fd2b8c0ea053dcf65d80c813103e8", size = 214923507, upload-time = "2025-05-09T19:32:51.275Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/5c/3f9700ba277d52c121dd2cebc8a672fb60b53e888972fc6682b6692a766c/onnxruntime_gpu-1.22.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86b064c8f6cbe6da03f51f46351237d985f8fd5eb907d3f9997ea91881131a13", size = 283199528, upload-time = "2025-05-09T19:39:54.489Z" },
+ { url = "https://files.pythonhosted.org/packages/48/9e/f95af15627c8b9f866f2e372e467a9f1e14e7ebec224ed4b8e71ce970c81/onnxruntime_gpu-1.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:89cfd71e1ba17a4668e8770e344f22cde64bfd70b2ad3d03b8a390d4414b5995", size = 214923964, upload-time = "2025-05-09T19:33:04.028Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/26/35efe9dae012f453f2f7698dec3604368ce91ee2a0464336d2284fe02e3b/onnxruntime_gpu-1.22.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c3e635792931c5edf48a6a44b8daf4f74a9458e2d60245d24d91e29b6c1c7aa5", size = 283205630, upload-time = "2025-05-09T19:40:12.749Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/d8/0063e4973c54d3b39d6b3025a31f80bfda6386fa0eb16fc047f2fe724832/onnxruntime_gpu-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:082c9744b0470448a7d814babe058d0b5074380f32839aa655e5e5f9975f6d94", size = 214924126, upload-time = "2025-05-09T19:33:14.647Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/ab/943c659cded9288519c67e6d5827973762207d19035972c703a1fefd032c/onnxruntime_gpu-1.22.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1559033601d71023d72a8e279b2575a104de5f46e136f87534206aa2044eb1c", size = 283210584, upload-time = "2025-05-09T19:40:27.372Z" },
+]
+
+[[package]]
+name = "onnxsim"
+version = "0.4.36"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "onnx" },
+ { name = "rich" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ce/9e/f34238413ebeda9a3a8802feeaa5013934455466b9ab390b48ad9c7e184f/onnxsim-0.4.36.tar.gz", hash = "sha256:6e0ee9d6d4a83042bdef7319fbe58352d9fda5f253386be2b267c7c27f0638ee", size = 20993703, upload-time = "2024-03-04T08:25:00.086Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9b/55/0a0a248636cccccb7d4ed9189446e00017f411d0e13d1dd4af419ee4d529/onnxsim-0.4.36-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:7498e7b9584c4b354b455564dfba66d460ce2c205b71dae169cfa9b6704e03fd", size = 3471278, upload-time = "2024-03-04T08:24:26.988Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/6e/80c77b5c6ec079994295e6e685097fa42732a1e7c5a22fe9c5c4ca1aac74/onnxsim-0.4.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce87837f8975beebdcc98cc01d6d13e84b10900eb2c14035ce1066c3d670d96d", size = 2255237, upload-time = "2024-03-04T08:24:29.047Z" },
+ { url = "https://files.pythonhosted.org/packages/0e/c5/6c93b354684b3fc4b520a23be3e4db5870b35dde9e9e2a1f41018ba369e8/onnxsim-0.4.36-cp310-cp310-win_amd64.whl", hash = "sha256:f92bec8c6c0d4f8463e10021277711d2faac900e4eb890238001b3eadb5c03bc", size = 1288644, upload-time = "2024-03-04T08:24:31.275Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/a3/f6cad8499f375a3acc8a7837721f82860244656cf62984cf80ebe187cc68/onnxsim-0.4.36-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:150b9a3a409af2f3161af3fecda2113e0e6e296fb015b5205a9ddf645765acad", size = 3471242, upload-time = "2024-03-04T08:24:33.289Z" },
+ { url = "https://files.pythonhosted.org/packages/db/94/22aab761b3d416bce02020d9ca98dc692427c2717b0325952e30ce41f83b/onnxsim-0.4.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa7596e6b806ed19077f7652788a50ee576c172b4d16d421f0593aef1a6fa4c4", size = 2255003, upload-time = "2024-03-04T08:24:35.024Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/5c/aa277f45b0d8253027d1ce3269952e116b476985e5fb497e00ebd917ce29/onnxsim-0.4.36-cp311-cp311-win_amd64.whl", hash = "sha256:91fb32def04f2f89d5f76527c852332366957752e5e61ac25be0b2d7bb410f89", size = 1288684, upload-time = "2024-03-04T08:24:37.064Z" },
+]
+
+[[package]]
+name = "opencv-python"
+version = "4.11.0.86"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/17/06/68c27a523103dad5837dc5b87e71285280c4f098c60e4fe8a8db6486ab09/opencv-python-4.11.0.86.tar.gz", hash = "sha256:03d60ccae62304860d232272e4a4fda93c39d595780cb40b161b310244b736a4", size = 95171956, upload-time = "2025-01-16T13:52:24.737Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl", hash = "sha256:432f67c223f1dc2824f5e73cdfcd9db0efc8710647d4e813012195dc9122a52a", size = 37326322, upload-time = "2025-01-16T13:52:25.887Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/84/0a67490741867eacdfa37bc18df96e08a9d579583b419010d7f3da8ff503/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_x86_64.whl", hash = "sha256:9d05ef13d23fe97f575153558653e2d6e87103995d54e6a35db3f282fe1f9c66", size = 56723197, upload-time = "2025-01-16T13:55:21.222Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/bd/29c126788da65c1fb2b5fb621b7fed0ed5f9122aa22a0868c5e2c15c6d23/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b92ae2c8852208817e6776ba1ea0d6b1e0a1b5431e971a2a0ddd2a8cc398202", size = 42230439, upload-time = "2025-01-16T13:51:35.822Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b02611523803495003bd87362db3e1d2a0454a6a63025dc6658a9830570aa0d", size = 62986597, upload-time = "2025-01-16T13:52:08.836Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/d7/1d5941a9dde095468b288d989ff6539dd69cd429dbf1b9e839013d21b6f0/opencv_python-4.11.0.86-cp37-abi3-win32.whl", hash = "sha256:810549cb2a4aedaa84ad9a1c92fbfdfc14090e2749cedf2c1589ad8359aa169b", size = 29384337, upload-time = "2025-01-16T13:52:13.549Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl", hash = "sha256:085ad9b77c18853ea66283e98affefe2de8cc4c1f43eda4c100cf9b2721142ec", size = 39488044, upload-time = "2025-01-16T13:52:21.928Z" },
+]
+
+[[package]]
+name = "packaging"
+version = "25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
+]
+
+[[package]]
+name = "paginate"
+version = "0.5.7"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ec/46/68dde5b6bc00c1296ec6466ab27dddede6aec9af1b99090e1107091b3b84/paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945", size = 19252, upload-time = "2024-08-25T14:17:24.139Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/90/96/04b8e52da071d28f5e21a805b19cb9390aa17a47462ac87f5e2696b9566d/paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591", size = 13746, upload-time = "2024-08-25T14:17:22.55Z" },
+]
+
+[[package]]
+name = "pathspec"
+version = "0.12.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" },
+]
+
+[[package]]
+name = "pbr"
+version = "6.1.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "setuptools" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/01/d2/510cc0d218e753ba62a1bc1434651db3cd797a9716a0a66cc714cb4f0935/pbr-6.1.1.tar.gz", hash = "sha256:93ea72ce6989eb2eed99d0f75721474f69ad88128afdef5ac377eb797c4bf76b", size = 125702, upload-time = "2025-02-04T14:28:06.514Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/47/ac/684d71315abc7b1214d59304e23a982472967f6bf4bde5a98f1503f648dc/pbr-6.1.1-py2.py3-none-any.whl", hash = "sha256:38d4daea5d9fa63b3f626131b9d34947fd0c8be9b05a29276870580050a25a76", size = 108997, upload-time = "2025-02-04T14:28:03.168Z" },
+]
+
+[[package]]
+name = "pillow"
+version = "11.3.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4c/5d/45a3553a253ac8763f3561371432a90bdbe6000fbdcf1397ffe502aa206c/pillow-11.3.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b9c17fd4ace828b3003dfd1e30bff24863e0eb59b535e8f80194d9cc7ecf860", size = 5316554, upload-time = "2025-07-01T09:13:39.342Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/c8/67c12ab069ef586a25a4a79ced553586748fad100c77c0ce59bb4983ac98/pillow-11.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:65dc69160114cdd0ca0f35cb434633c75e8e7fad4cf855177a05bf38678f73ad", size = 4686548, upload-time = "2025-07-01T09:13:41.835Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/bd/6741ebd56263390b382ae4c5de02979af7f8bd9807346d068700dd6d5cf9/pillow-11.3.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7107195ddc914f656c7fc8e4a5e1c25f32e9236ea3ea860f257b0436011fddd0", size = 5859742, upload-time = "2025-07-03T13:09:47.439Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/0b/c412a9e27e1e6a829e6ab6c2dca52dd563efbedf4c9c6aa453d9a9b77359/pillow-11.3.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cc3e831b563b3114baac7ec2ee86819eb03caa1a2cef0b481a5675b59c4fe23b", size = 7633087, upload-time = "2025-07-03T13:09:51.796Z" },
+ { url = "https://files.pythonhosted.org/packages/59/9d/9b7076aaf30f5dd17e5e5589b2d2f5a5d7e30ff67a171eb686e4eecc2adf/pillow-11.3.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f1f182ebd2303acf8c380a54f615ec883322593320a9b00438eb842c1f37ae50", size = 5963350, upload-time = "2025-07-01T09:13:43.865Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/16/1a6bf01fb622fb9cf5c91683823f073f053005c849b1f52ed613afcf8dae/pillow-11.3.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4445fa62e15936a028672fd48c4c11a66d641d2c05726c7ec1f8ba6a572036ae", size = 6631840, upload-time = "2025-07-01T09:13:46.161Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/e6/6ff7077077eb47fde78739e7d570bdcd7c10495666b6afcd23ab56b19a43/pillow-11.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:71f511f6b3b91dd543282477be45a033e4845a40278fa8dcdbfdb07109bf18f9", size = 6074005, upload-time = "2025-07-01T09:13:47.829Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/3a/b13f36832ea6d279a697231658199e0a03cd87ef12048016bdcc84131601/pillow-11.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040a5b691b0713e1f6cbe222e0f4f74cd233421e105850ae3b3c0ceda520f42e", size = 6708372, upload-time = "2025-07-01T09:13:52.145Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/e4/61b2e1a7528740efbc70b3d581f33937e38e98ef3d50b05007267a55bcb2/pillow-11.3.0-cp310-cp310-win32.whl", hash = "sha256:89bd777bc6624fe4115e9fac3352c79ed60f3bb18651420635f26e643e3dd1f6", size = 6277090, upload-time = "2025-07-01T09:13:53.915Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/d3/60c781c83a785d6afbd6a326ed4d759d141de43aa7365725cbcd65ce5e54/pillow-11.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:19d2ff547c75b8e3ff46f4d9ef969a06c30ab2d4263a9e287733aa8b2429ce8f", size = 6985988, upload-time = "2025-07-01T09:13:55.699Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/28/4f4a0203165eefb3763939c6789ba31013a2e90adffb456610f30f613850/pillow-11.3.0-cp310-cp310-win_arm64.whl", hash = "sha256:819931d25e57b513242859ce1876c58c59dc31587847bf74cfe06b2e0cb22d2f", size = 2422899, upload-time = "2025-07-01T09:13:57.497Z" },
+ { url = "https://files.pythonhosted.org/packages/db/26/77f8ed17ca4ffd60e1dcd220a6ec6d71210ba398cfa33a13a1cd614c5613/pillow-11.3.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1cd110edf822773368b396281a2293aeb91c90a2db00d78ea43e7e861631b722", size = 5316531, upload-time = "2025-07-01T09:13:59.203Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/39/ee475903197ce709322a17a866892efb560f57900d9af2e55f86db51b0a5/pillow-11.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9c412fddd1b77a75aa904615ebaa6001f169b26fd467b4be93aded278266b288", size = 4686560, upload-time = "2025-07-01T09:14:01.101Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/90/442068a160fd179938ba55ec8c97050a612426fae5ec0a764e345839f76d/pillow-11.3.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1aa4de119a0ecac0a34a9c8bde33f34022e2e8f99104e47a3ca392fd60e37d", size = 5870978, upload-time = "2025-07-03T13:09:55.638Z" },
+ { url = "https://files.pythonhosted.org/packages/13/92/dcdd147ab02daf405387f0218dcf792dc6dd5b14d2573d40b4caeef01059/pillow-11.3.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:91da1d88226663594e3f6b4b8c3c8d85bd504117d043740a8e0ec449087cc494", size = 7641168, upload-time = "2025-07-03T13:10:00.37Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/db/839d6ba7fd38b51af641aa904e2960e7a5644d60ec754c046b7d2aee00e5/pillow-11.3.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:643f189248837533073c405ec2f0bb250ba54598cf80e8c1e043381a60632f58", size = 5973053, upload-time = "2025-07-01T09:14:04.491Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/2f/d7675ecae6c43e9f12aa8d58b6012683b20b6edfbdac7abcb4e6af7a3784/pillow-11.3.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:106064daa23a745510dabce1d84f29137a37224831d88eb4ce94bb187b1d7e5f", size = 6640273, upload-time = "2025-07-01T09:14:06.235Z" },
+ { url = "https://files.pythonhosted.org/packages/45/ad/931694675ede172e15b2ff03c8144a0ddaea1d87adb72bb07655eaffb654/pillow-11.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cd8ff254faf15591e724dc7c4ddb6bf4793efcbe13802a4ae3e863cd300b493e", size = 6082043, upload-time = "2025-07-01T09:14:07.978Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/04/ba8f2b11fc80d2dd462d7abec16351b45ec99cbbaea4387648a44190351a/pillow-11.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:932c754c2d51ad2b2271fd01c3d121daaa35e27efae2a616f77bf164bc0b3e94", size = 6715516, upload-time = "2025-07-01T09:14:10.233Z" },
+ { url = "https://files.pythonhosted.org/packages/48/59/8cd06d7f3944cc7d892e8533c56b0acb68399f640786313275faec1e3b6f/pillow-11.3.0-cp311-cp311-win32.whl", hash = "sha256:b4b8f3efc8d530a1544e5962bd6b403d5f7fe8b9e08227c6b255f98ad82b4ba0", size = 6274768, upload-time = "2025-07-01T09:14:11.921Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/cc/29c0f5d64ab8eae20f3232da8f8571660aa0ab4b8f1331da5c2f5f9a938e/pillow-11.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:1a992e86b0dd7aeb1f053cd506508c0999d710a8f07b4c791c63843fc6a807ac", size = 6986055, upload-time = "2025-07-01T09:14:13.623Z" },
+ { url = "https://files.pythonhosted.org/packages/c6/df/90bd886fabd544c25addd63e5ca6932c86f2b701d5da6c7839387a076b4a/pillow-11.3.0-cp311-cp311-win_arm64.whl", hash = "sha256:30807c931ff7c095620fe04448e2c2fc673fcbb1ffe2a7da3fb39613489b1ddd", size = 2423079, upload-time = "2025-07-01T09:14:15.268Z" },
+ { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" },
+ { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" },
+ { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" },
+ { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" },
+ { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" },
+ { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" },
+ { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" },
+ { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" },
+ { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" },
+ { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" },
+ { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" },
+ { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" },
+ { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" },
+ { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" },
+ { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" },
+ { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" },
+ { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" },
+ { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" },
+ { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" },
+ { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" },
+ { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" },
+ { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/8b/209bd6b62ce8367f47e68a218bffac88888fdf2c9fcf1ecadc6c3ec1ebc7/pillow-11.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:3cee80663f29e3843b68199b9d6f4f54bd1d4a6b59bdd91bceefc51238bcb967", size = 5270556, upload-time = "2025-07-01T09:16:09.961Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/e6/231a0b76070c2cfd9e260a7a5b504fb72da0a95279410fa7afd99d9751d6/pillow-11.3.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b5f56c3f344f2ccaf0dd875d3e180f631dc60a51b314295a3e681fe8cf851fbe", size = 4654625, upload-time = "2025-07-01T09:16:11.913Z" },
+ { url = "https://files.pythonhosted.org/packages/13/f4/10cf94fda33cb12765f2397fc285fa6d8eb9c29de7f3185165b702fc7386/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e67d793d180c9df62f1f40aee3accca4829d3794c95098887edc18af4b8b780c", size = 4874207, upload-time = "2025-07-03T13:11:10.201Z" },
+ { url = "https://files.pythonhosted.org/packages/72/c9/583821097dc691880c92892e8e2d41fe0a5a3d6021f4963371d2f6d57250/pillow-11.3.0-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d000f46e2917c705e9fb93a3606ee4a819d1e3aa7a9b442f6444f07e77cf5e25", size = 6583939, upload-time = "2025-07-03T13:11:15.68Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/8e/5c9d410f9217b12320efc7c413e72693f48468979a013ad17fd690397b9a/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:527b37216b6ac3a12d7838dc3bd75208ec57c1c6d11ef01902266a5a0c14fc27", size = 4957166, upload-time = "2025-07-01T09:16:13.74Z" },
+ { url = "https://files.pythonhosted.org/packages/62/bb/78347dbe13219991877ffb3a91bf09da8317fbfcd4b5f9140aeae020ad71/pillow-11.3.0-pp310-pypy310_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:be5463ac478b623b9dd3937afd7fb7ab3d79dd290a28e2b6df292dc75063eb8a", size = 5581482, upload-time = "2025-07-01T09:16:16.107Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/28/1000353d5e61498aaeaaf7f1e4b49ddb05f2c6575f9d4f9f914a3538b6e1/pillow-11.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:8dc70ca24c110503e16918a658b869019126ecfe03109b754c402daff12b3d9f", size = 6984596, upload-time = "2025-07-01T09:16:18.07Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/e3/6fa84033758276fb31da12e5fb66ad747ae83b93c67af17f8c6ff4cc8f34/pillow-11.3.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7c8ec7a017ad1bd562f93dbd8505763e688d388cde6e4a010ae1486916e713e6", size = 5270566, upload-time = "2025-07-01T09:16:19.801Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/ee/e8d2e1ab4892970b561e1ba96cbd59c0d28cf66737fc44abb2aec3795a4e/pillow-11.3.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:9ab6ae226de48019caa8074894544af5b53a117ccb9d3b3dcb2871464c829438", size = 4654618, upload-time = "2025-07-01T09:16:21.818Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/6d/17f80f4e1f0761f02160fc433abd4109fa1548dcfdca46cfdadaf9efa565/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fe27fb049cdcca11f11a7bfda64043c37b30e6b91f10cb5bab275806c32f6ab3", size = 4874248, upload-time = "2025-07-03T13:11:20.738Z" },
+ { url = "https://files.pythonhosted.org/packages/de/5f/c22340acd61cef960130585bbe2120e2fd8434c214802f07e8c03596b17e/pillow-11.3.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:465b9e8844e3c3519a983d58b80be3f668e2a7a5db97f2784e7079fbc9f9822c", size = 6583963, upload-time = "2025-07-03T13:11:26.283Z" },
+ { url = "https://files.pythonhosted.org/packages/31/5e/03966aedfbfcbb4d5f8aa042452d3361f325b963ebbadddac05b122e47dd/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5418b53c0d59b3824d05e029669efa023bbef0f3e92e75ec8428f3799487f361", size = 4957170, upload-time = "2025-07-01T09:16:23.762Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/2d/e082982aacc927fc2cab48e1e731bdb1643a1406acace8bed0900a61464e/pillow-11.3.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:504b6f59505f08ae014f724b6207ff6222662aab5cc9542577fb084ed0676ac7", size = 5581505, upload-time = "2025-07-01T09:16:25.593Z" },
+ { url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598, upload-time = "2025-07-01T09:16:27.732Z" },
+]
+
+[[package]]
+name = "platformdirs"
+version = "4.3.8"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" },
+]
+
+[[package]]
+name = "pluggy"
+version = "1.6.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
+]
+
+[[package]]
+name = "pre-commit"
+version = "4.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cfgv" },
+ { name = "identify" },
+ { name = "nodeenv" },
+ { name = "pyyaml" },
+ { name = "virtualenv" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ff/29/7cf5bbc236333876e4b41f56e06857a87937ce4bf91e117a6991a2dbb02a/pre_commit-4.3.0.tar.gz", hash = "sha256:499fe450cc9d42e9d58e606262795ecb64dd05438943c62b66f6a8673da30b16", size = 193792, upload-time = "2025-08-09T18:56:14.651Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5b/a5/987a405322d78a73b66e39e4a90e4ef156fd7141bf71df987e50717c321b/pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8", size = 220965, upload-time = "2025-08-09T18:56:13.192Z" },
+]
+
+[[package]]
+name = "propcache"
+version = "0.3.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ab/14/510deed325e262afeb8b360043c5d7c960da7d3ecd6d6f9496c9c56dc7f4/propcache-0.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:22d9962a358aedbb7a2e36187ff273adeaab9743373a272976d2e348d08c7770", size = 73178, upload-time = "2025-06-09T22:53:40.126Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/4e/ad52a7925ff01c1325653a730c7ec3175a23f948f08626a534133427dcff/propcache-0.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0d0fda578d1dc3f77b6b5a5dce3b9ad69a8250a891760a548df850a5e8da87f3", size = 43133, upload-time = "2025-06-09T22:53:41.965Z" },
+ { url = "https://files.pythonhosted.org/packages/63/7c/e9399ba5da7780871db4eac178e9c2e204c23dd3e7d32df202092a1ed400/propcache-0.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3def3da3ac3ce41562d85db655d18ebac740cb3fa4367f11a52b3da9d03a5cc3", size = 43039, upload-time = "2025-06-09T22:53:43.268Z" },
+ { url = "https://files.pythonhosted.org/packages/22/e1/58da211eb8fdc6fc854002387d38f415a6ca5f5c67c1315b204a5d3e9d7a/propcache-0.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bec58347a5a6cebf239daba9bda37dffec5b8d2ce004d9fe4edef3d2815137e", size = 201903, upload-time = "2025-06-09T22:53:44.872Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/0a/550ea0f52aac455cb90111c8bab995208443e46d925e51e2f6ebdf869525/propcache-0.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55ffda449a507e9fbd4aca1a7d9aa6753b07d6166140e5a18d2ac9bc49eac220", size = 213362, upload-time = "2025-06-09T22:53:46.707Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/af/9893b7d878deda9bb69fcf54600b247fba7317761b7db11fede6e0f28bd0/propcache-0.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64a67fb39229a8a8491dd42f864e5e263155e729c2e7ff723d6e25f596b1e8cb", size = 210525, upload-time = "2025-06-09T22:53:48.547Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/bb/38fd08b278ca85cde36d848091ad2b45954bc5f15cce494bb300b9285831/propcache-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da1cf97b92b51253d5b68cf5a2b9e0dafca095e36b7f2da335e27dc6172a614", size = 198283, upload-time = "2025-06-09T22:53:50.067Z" },
+ { url = "https://files.pythonhosted.org/packages/78/8c/9fe55bd01d362bafb413dfe508c48753111a1e269737fa143ba85693592c/propcache-0.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5f559e127134b07425134b4065be45b166183fdcb433cb6c24c8e4149056ad50", size = 191872, upload-time = "2025-06-09T22:53:51.438Z" },
+ { url = "https://files.pythonhosted.org/packages/54/14/4701c33852937a22584e08abb531d654c8bcf7948a8f87ad0a4822394147/propcache-0.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:aff2e4e06435d61f11a428360a932138d0ec288b0a31dd9bd78d200bd4a2b339", size = 199452, upload-time = "2025-06-09T22:53:53.229Z" },
+ { url = "https://files.pythonhosted.org/packages/16/44/447f2253d859602095356007657ee535e0093215ea0b3d1d6a41d16e5201/propcache-0.3.2-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:4927842833830942a5d0a56e6f4839bc484785b8e1ce8d287359794818633ba0", size = 191567, upload-time = "2025-06-09T22:53:54.541Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/b3/e4756258749bb2d3b46defcff606a2f47410bab82be5824a67e84015b267/propcache-0.3.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6107ddd08b02654a30fb8ad7a132021759d750a82578b94cd55ee2772b6ebea2", size = 193015, upload-time = "2025-06-09T22:53:56.44Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/df/e6d3c7574233164b6330b9fd697beeac402afd367280e6dc377bb99b43d9/propcache-0.3.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:70bd8b9cd6b519e12859c99f3fc9a93f375ebd22a50296c3a295028bea73b9e7", size = 204660, upload-time = "2025-06-09T22:53:57.839Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/53/e4d31dd5170b4a0e2e6b730f2385a96410633b4833dc25fe5dffd1f73294/propcache-0.3.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2183111651d710d3097338dd1893fcf09c9f54e27ff1a8795495a16a469cc90b", size = 206105, upload-time = "2025-06-09T22:53:59.638Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/fe/74d54cf9fbe2a20ff786e5f7afcfde446588f0cf15fb2daacfbc267b866c/propcache-0.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fb075ad271405dcad8e2a7ffc9a750a3bf70e533bd86e89f0603e607b93aa64c", size = 196980, upload-time = "2025-06-09T22:54:01.071Z" },
+ { url = "https://files.pythonhosted.org/packages/22/ec/c469c9d59dada8a7679625e0440b544fe72e99311a4679c279562051f6fc/propcache-0.3.2-cp310-cp310-win32.whl", hash = "sha256:404d70768080d3d3bdb41d0771037da19d8340d50b08e104ca0e7f9ce55fce70", size = 37679, upload-time = "2025-06-09T22:54:03.003Z" },
+ { url = "https://files.pythonhosted.org/packages/38/35/07a471371ac89d418f8d0b699c75ea6dca2041fbda360823de21f6a9ce0a/propcache-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:7435d766f978b4ede777002e6b3b6641dd229cd1da8d3d3106a45770365f9ad9", size = 41459, upload-time = "2025-06-09T22:54:04.134Z" },
+ { url = "https://files.pythonhosted.org/packages/80/8d/e8b436717ab9c2cfc23b116d2c297305aa4cd8339172a456d61ebf5669b8/propcache-0.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0b8d2f607bd8f80ddc04088bc2a037fdd17884a6fcadc47a96e334d72f3717be", size = 74207, upload-time = "2025-06-09T22:54:05.399Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/29/1e34000e9766d112171764b9fa3226fa0153ab565d0c242c70e9945318a7/propcache-0.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06766d8f34733416e2e34f46fea488ad5d60726bb9481d3cddf89a6fa2d9603f", size = 43648, upload-time = "2025-06-09T22:54:08.023Z" },
+ { url = "https://files.pythonhosted.org/packages/46/92/1ad5af0df781e76988897da39b5f086c2bf0f028b7f9bd1f409bb05b6874/propcache-0.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2dc1f4a1df4fecf4e6f68013575ff4af84ef6f478fe5344317a65d38a8e6dc9", size = 43496, upload-time = "2025-06-09T22:54:09.228Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/ce/e96392460f9fb68461fabab3e095cb00c8ddf901205be4eae5ce246e5b7e/propcache-0.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be29c4f4810c5789cf10ddf6af80b041c724e629fa51e308a7a0fb19ed1ef7bf", size = 217288, upload-time = "2025-06-09T22:54:10.466Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/2a/866726ea345299f7ceefc861a5e782b045545ae6940851930a6adaf1fca6/propcache-0.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59d61f6970ecbd8ff2e9360304d5c8876a6abd4530cb752c06586849ac8a9dc9", size = 227456, upload-time = "2025-06-09T22:54:11.828Z" },
+ { url = "https://files.pythonhosted.org/packages/de/03/07d992ccb6d930398689187e1b3c718339a1c06b8b145a8d9650e4726166/propcache-0.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:62180e0b8dbb6b004baec00a7983e4cc52f5ada9cd11f48c3528d8cfa7b96a66", size = 225429, upload-time = "2025-06-09T22:54:13.823Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/e6/116ba39448753b1330f48ab8ba927dcd6cf0baea8a0ccbc512dfb49ba670/propcache-0.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c144ca294a204c470f18cf4c9d78887810d04a3e2fbb30eea903575a779159df", size = 213472, upload-time = "2025-06-09T22:54:15.232Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/85/f01f5d97e54e428885a5497ccf7f54404cbb4f906688a1690cd51bf597dc/propcache-0.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5c2a784234c28854878d68978265617aa6dc0780e53d44b4d67f3651a17a9a2", size = 204480, upload-time = "2025-06-09T22:54:17.104Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/79/7bf5ab9033b8b8194cc3f7cf1aaa0e9c3256320726f64a3e1f113a812dce/propcache-0.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5745bc7acdafa978ca1642891b82c19238eadc78ba2aaa293c6863b304e552d7", size = 214530, upload-time = "2025-06-09T22:54:18.512Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0b/bd3e0c00509b609317df4a18e6b05a450ef2d9a963e1d8bc9c9415d86f30/propcache-0.3.2-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:c0075bf773d66fa8c9d41f66cc132ecc75e5bb9dd7cce3cfd14adc5ca184cb95", size = 205230, upload-time = "2025-06-09T22:54:19.947Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/23/fae0ff9b54b0de4e819bbe559508da132d5683c32d84d0dc2ccce3563ed4/propcache-0.3.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5f57aa0847730daceff0497f417c9de353c575d8da3579162cc74ac294c5369e", size = 206754, upload-time = "2025-06-09T22:54:21.716Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/7f/ad6a3c22630aaa5f618b4dc3c3598974a72abb4c18e45a50b3cdd091eb2f/propcache-0.3.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:eef914c014bf72d18efb55619447e0aecd5fb7c2e3fa7441e2e5d6099bddff7e", size = 218430, upload-time = "2025-06-09T22:54:23.17Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/2c/ba4f1c0e8a4b4c75910742f0d333759d441f65a1c7f34683b4a74c0ee015/propcache-0.3.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2a4092e8549031e82facf3decdbc0883755d5bbcc62d3aea9d9e185549936dcf", size = 223884, upload-time = "2025-06-09T22:54:25.539Z" },
+ { url = "https://files.pythonhosted.org/packages/88/e4/ebe30fc399e98572019eee82ad0caf512401661985cbd3da5e3140ffa1b0/propcache-0.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:85871b050f174bc0bfb437efbdb68aaf860611953ed12418e4361bc9c392749e", size = 211480, upload-time = "2025-06-09T22:54:26.892Z" },
+ { url = "https://files.pythonhosted.org/packages/96/0a/7d5260b914e01d1d0906f7f38af101f8d8ed0dc47426219eeaf05e8ea7c2/propcache-0.3.2-cp311-cp311-win32.whl", hash = "sha256:36c8d9b673ec57900c3554264e630d45980fd302458e4ac801802a7fd2ef7897", size = 37757, upload-time = "2025-06-09T22:54:28.241Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/2d/89fe4489a884bc0da0c3278c552bd4ffe06a1ace559db5ef02ef24ab446b/propcache-0.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53af8cb6a781b02d2ea079b5b853ba9430fcbe18a8e3ce647d5982a3ff69f39", size = 41500, upload-time = "2025-06-09T22:54:29.4Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" },
+ { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" },
+ { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" },
+ { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" },
+ { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" },
+ { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" },
+ { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" },
+ { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" },
+ { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" },
+ { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" },
+ { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" },
+ { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" },
+ { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" },
+ { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" },
+ { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" },
+]
+
+[[package]]
+name = "protobuf"
+version = "6.31.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/52/f3/b9655a711b32c19720253f6f06326faf90580834e2e83f840472d752bc8b/protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a", size = 441797, upload-time = "2025-05-28T19:25:54.947Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f3/6f/6ab8e4bf962fd5570d3deaa2d5c38f0a363f57b4501047b5ebeb83ab1125/protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9", size = 423603, upload-time = "2025-05-28T19:25:41.198Z" },
+ { url = "https://files.pythonhosted.org/packages/44/3a/b15c4347dd4bf3a1b0ee882f384623e2063bb5cf9fa9d57990a4f7df2fb6/protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447", size = 435283, upload-time = "2025-05-28T19:25:44.275Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/c9/b9689a2a250264a84e66c46d8862ba788ee7a641cdca39bccf64f59284b7/protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402", size = 425604, upload-time = "2025-05-28T19:25:45.702Z" },
+ { url = "https://files.pythonhosted.org/packages/76/a1/7a5a94032c83375e4fe7e7f56e3976ea6ac90c5e85fac8576409e25c39c3/protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39", size = 322115, upload-time = "2025-05-28T19:25:47.128Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/b1/b59d405d64d31999244643d88c45c8241c58f17cc887e73bcb90602327f8/protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6", size = 321070, upload-time = "2025-05-28T19:25:50.036Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/af/ab3c51ab7507a7325e98ffe691d9495ee3d3aa5f589afad65ec920d39821/protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e", size = 168724, upload-time = "2025-05-28T19:25:53.926Z" },
+]
+
+[[package]]
+name = "psutil"
+version = "7.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" },
+ { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" },
+ { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" },
+ { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" },
+]
+
+[[package]]
+name = "py-cpuinfo"
+version = "9.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690", size = 104716, upload-time = "2022-10-25T20:38:06.303Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5", size = 22335, upload-time = "2022-10-25T20:38:27.636Z" },
+]
+
+[[package]]
+name = "pycparser"
+version = "2.22"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" },
+]
+
+[[package]]
+name = "pydantic"
+version = "2.11.7"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "annotated-types" },
+ { name = "pydantic-core" },
+ { name = "typing-extensions" },
+ { name = "typing-inspection" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" },
+]
+
+[[package]]
+name = "pydantic-core"
+version = "2.33.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e5/92/b31726561b5dae176c2d2c2dc43a9c5bfba5d32f96f8b4c0a600dd492447/pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8", size = 2028817, upload-time = "2025-04-23T18:30:43.919Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/44/3f0b95fafdaca04a483c4e685fe437c6891001bf3ce8b2fded82b9ea3aa1/pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d", size = 1861357, upload-time = "2025-04-23T18:30:46.372Z" },
+ { url = "https://files.pythonhosted.org/packages/30/97/e8f13b55766234caae05372826e8e4b3b96e7b248be3157f53237682e43c/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d", size = 1898011, upload-time = "2025-04-23T18:30:47.591Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/a3/99c48cf7bafc991cc3ee66fd544c0aae8dc907b752f1dad2d79b1b5a471f/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572", size = 1982730, upload-time = "2025-04-23T18:30:49.328Z" },
+ { url = "https://files.pythonhosted.org/packages/de/8e/a5b882ec4307010a840fb8b58bd9bf65d1840c92eae7534c7441709bf54b/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02", size = 2136178, upload-time = "2025-04-23T18:30:50.907Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/bb/71e35fc3ed05af6834e890edb75968e2802fe98778971ab5cba20a162315/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b", size = 2736462, upload-time = "2025-04-23T18:30:52.083Z" },
+ { url = "https://files.pythonhosted.org/packages/31/0d/c8f7593e6bc7066289bbc366f2235701dcbebcd1ff0ef8e64f6f239fb47d/pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2", size = 2005652, upload-time = "2025-04-23T18:30:53.389Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/7a/996d8bd75f3eda405e3dd219ff5ff0a283cd8e34add39d8ef9157e722867/pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a", size = 2113306, upload-time = "2025-04-23T18:30:54.661Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/84/daf2a6fb2db40ffda6578a7e8c5a6e9c8affb251a05c233ae37098118788/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac", size = 2073720, upload-time = "2025-04-23T18:30:56.11Z" },
+ { url = "https://files.pythonhosted.org/packages/77/fb/2258da019f4825128445ae79456a5499c032b55849dbd5bed78c95ccf163/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a", size = 2244915, upload-time = "2025-04-23T18:30:57.501Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/7a/925ff73756031289468326e355b6fa8316960d0d65f8b5d6b3a3e7866de7/pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b", size = 2241884, upload-time = "2025-04-23T18:30:58.867Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/b0/249ee6d2646f1cdadcb813805fe76265745c4010cf20a8eba7b0e639d9b2/pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22", size = 1910496, upload-time = "2025-04-23T18:31:00.078Z" },
+ { url = "https://files.pythonhosted.org/packages/66/ff/172ba8f12a42d4b552917aa65d1f2328990d3ccfc01d5b7c943ec084299f/pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640", size = 1955019, upload-time = "2025-04-23T18:31:01.335Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/8d/71db63483d518cbbf290261a1fc2839d17ff89fce7089e08cad07ccfce67/pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7", size = 2028584, upload-time = "2025-04-23T18:31:03.106Z" },
+ { url = "https://files.pythonhosted.org/packages/24/2f/3cfa7244ae292dd850989f328722d2aef313f74ffc471184dc509e1e4e5a/pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246", size = 1855071, upload-time = "2025-04-23T18:31:04.621Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/d3/4ae42d33f5e3f50dd467761304be2fa0a9417fbf09735bc2cce003480f2a/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f", size = 1897823, upload-time = "2025-04-23T18:31:06.377Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/f3/aa5976e8352b7695ff808599794b1fba2a9ae2ee954a3426855935799488/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc", size = 1983792, upload-time = "2025-04-23T18:31:07.93Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/7a/cda9b5a23c552037717f2b2a5257e9b2bfe45e687386df9591eff7b46d28/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de", size = 2136338, upload-time = "2025-04-23T18:31:09.283Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/9f/b8f9ec8dd1417eb9da784e91e1667d58a2a4a7b7b34cf4af765ef663a7e5/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a", size = 2730998, upload-time = "2025-04-23T18:31:11.7Z" },
+ { url = "https://files.pythonhosted.org/packages/47/bc/cd720e078576bdb8255d5032c5d63ee5c0bf4b7173dd955185a1d658c456/pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef", size = 2003200, upload-time = "2025-04-23T18:31:13.536Z" },
+ { url = "https://files.pythonhosted.org/packages/ca/22/3602b895ee2cd29d11a2b349372446ae9727c32e78a94b3d588a40fdf187/pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e", size = 2113890, upload-time = "2025-04-23T18:31:15.011Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/e6/e3c5908c03cf00d629eb38393a98fccc38ee0ce8ecce32f69fc7d7b558a7/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d", size = 2073359, upload-time = "2025-04-23T18:31:16.393Z" },
+ { url = "https://files.pythonhosted.org/packages/12/e7/6a36a07c59ebefc8777d1ffdaf5ae71b06b21952582e4b07eba88a421c79/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30", size = 2245883, upload-time = "2025-04-23T18:31:17.892Z" },
+ { url = "https://files.pythonhosted.org/packages/16/3f/59b3187aaa6cc0c1e6616e8045b284de2b6a87b027cce2ffcea073adf1d2/pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf", size = 2241074, upload-time = "2025-04-23T18:31:19.205Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/ed/55532bb88f674d5d8f67ab121a2a13c385df382de2a1677f30ad385f7438/pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51", size = 1910538, upload-time = "2025-04-23T18:31:20.541Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/1b/25b7cccd4519c0b23c2dd636ad39d381abf113085ce4f7bec2b0dc755eb1/pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab", size = 1952909, upload-time = "2025-04-23T18:31:22.371Z" },
+ { url = "https://files.pythonhosted.org/packages/49/a9/d809358e49126438055884c4366a1f6227f0f84f635a9014e2deb9b9de54/pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65", size = 1897786, upload-time = "2025-04-23T18:31:24.161Z" },
+ { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" },
+ { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" },
+ { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" },
+ { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" },
+ { url = "https://files.pythonhosted.org/packages/46/8c/99040727b41f56616573a28771b1bfa08a3d3fe74d3d513f01251f79f172/pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f", size = 2015688, upload-time = "2025-04-23T18:31:53.175Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/cc/5999d1eb705a6cefc31f0b4a90e9f7fc400539b1a1030529700cc1b51838/pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6", size = 1844808, upload-time = "2025-04-23T18:31:54.79Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/5e/a0a7b8885c98889a18b6e376f344da1ef323d270b44edf8174d6bce4d622/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef", size = 1885580, upload-time = "2025-04-23T18:31:57.393Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/2a/953581f343c7d11a304581156618c3f592435523dd9d79865903272c256a/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a", size = 1973859, upload-time = "2025-04-23T18:31:59.065Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/55/f1a813904771c03a3f97f676c62cca0c0a4138654107c1b61f19c644868b/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916", size = 2120810, upload-time = "2025-04-23T18:32:00.78Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/c3/053389835a996e18853ba107a63caae0b9deb4a276c6b472931ea9ae6e48/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a", size = 2676498, upload-time = "2025-04-23T18:32:02.418Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/3c/f4abd740877a35abade05e437245b192f9d0ffb48bbbbd708df33d3cda37/pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d", size = 2000611, upload-time = "2025-04-23T18:32:04.152Z" },
+ { url = "https://files.pythonhosted.org/packages/59/a7/63ef2fed1837d1121a894d0ce88439fe3e3b3e48c7543b2a4479eb99c2bd/pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56", size = 2107924, upload-time = "2025-04-23T18:32:06.129Z" },
+ { url = "https://files.pythonhosted.org/packages/04/8f/2551964ef045669801675f1cfc3b0d74147f4901c3ffa42be2ddb1f0efc4/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5", size = 2063196, upload-time = "2025-04-23T18:32:08.178Z" },
+ { url = "https://files.pythonhosted.org/packages/26/bd/d9602777e77fc6dbb0c7db9ad356e9a985825547dce5ad1d30ee04903918/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e", size = 2236389, upload-time = "2025-04-23T18:32:10.242Z" },
+ { url = "https://files.pythonhosted.org/packages/42/db/0e950daa7e2230423ab342ae918a794964b053bec24ba8af013fc7c94846/pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162", size = 2239223, upload-time = "2025-04-23T18:32:12.382Z" },
+ { url = "https://files.pythonhosted.org/packages/58/4d/4f937099c545a8a17eb52cb67fe0447fd9a373b348ccfa9a87f141eeb00f/pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849", size = 1900473, upload-time = "2025-04-23T18:32:14.034Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/75/4a0a9bac998d78d889def5e4ef2b065acba8cae8c93696906c3a91f310ca/pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9", size = 1955269, upload-time = "2025-04-23T18:32:15.783Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/86/1beda0576969592f1497b4ce8e7bc8cbdf614c352426271b1b10d5f0aa64/pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9", size = 1893921, upload-time = "2025-04-23T18:32:18.473Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/7d/e09391c2eebeab681df2b74bfe6c43422fffede8dc74187b2b0bf6fd7571/pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac", size = 1806162, upload-time = "2025-04-23T18:32:20.188Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/3d/847b6b1fed9f8ed3bb95a9ad04fbd0b212e832d4f0f50ff4d9ee5a9f15cf/pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5", size = 1981560, upload-time = "2025-04-23T18:32:22.354Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/9a/e73262f6c6656262b5fdd723ad90f518f579b7bc8622e43a942eec53c938/pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9", size = 1935777, upload-time = "2025-04-23T18:32:25.088Z" },
+ { url = "https://files.pythonhosted.org/packages/30/68/373d55e58b7e83ce371691f6eaa7175e3a24b956c44628eb25d7da007917/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa", size = 2023982, upload-time = "2025-04-23T18:32:53.14Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/16/145f54ac08c96a63d8ed6442f9dec17b2773d19920b627b18d4f10a061ea/pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29", size = 1858412, upload-time = "2025-04-23T18:32:55.52Z" },
+ { url = "https://files.pythonhosted.org/packages/41/b1/c6dc6c3e2de4516c0bb2c46f6a373b91b5660312342a0cf5826e38ad82fa/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d", size = 1892749, upload-time = "2025-04-23T18:32:57.546Z" },
+ { url = "https://files.pythonhosted.org/packages/12/73/8cd57e20afba760b21b742106f9dbdfa6697f1570b189c7457a1af4cd8a0/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e", size = 2067527, upload-time = "2025-04-23T18:32:59.771Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/d5/0bb5d988cc019b3cba4a78f2d4b3854427fc47ee8ec8e9eaabf787da239c/pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c", size = 2108225, upload-time = "2025-04-23T18:33:04.51Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/c5/00c02d1571913d496aabf146106ad8239dc132485ee22efe08085084ff7c/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec", size = 2069490, upload-time = "2025-04-23T18:33:06.391Z" },
+ { url = "https://files.pythonhosted.org/packages/22/a8/dccc38768274d3ed3a59b5d06f59ccb845778687652daa71df0cab4040d7/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052", size = 2237525, upload-time = "2025-04-23T18:33:08.44Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/e7/4f98c0b125dda7cf7ccd14ba936218397b44f50a56dd8c16a3091df116c3/pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c", size = 2238446, upload-time = "2025-04-23T18:33:10.313Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/91/2ec36480fdb0b783cd9ef6795753c1dea13882f2e68e73bce76ae8c21e6a/pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808", size = 2066678, upload-time = "2025-04-23T18:33:12.224Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/27/d4ae6487d73948d6f20dddcd94be4ea43e74349b56eba82e9bdee2d7494c/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8", size = 2025200, upload-time = "2025-04-23T18:33:14.199Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/b8/b3cb95375f05d33801024079b9392a5ab45267a63400bf1866e7ce0f0de4/pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593", size = 1859123, upload-time = "2025-04-23T18:33:16.555Z" },
+ { url = "https://files.pythonhosted.org/packages/05/bc/0d0b5adeda59a261cd30a1235a445bf55c7e46ae44aea28f7bd6ed46e091/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612", size = 1892852, upload-time = "2025-04-23T18:33:18.513Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/11/d37bdebbda2e449cb3f519f6ce950927b56d62f0b84fd9cb9e372a26a3d5/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7", size = 2067484, upload-time = "2025-04-23T18:33:20.475Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/55/1f95f0a05ce72ecb02a8a8a1c3be0579bbc29b1d5ab68f1378b7bebc5057/pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e", size = 2108896, upload-time = "2025-04-23T18:33:22.501Z" },
+ { url = "https://files.pythonhosted.org/packages/53/89/2b2de6c81fa131f423246a9109d7b2a375e83968ad0800d6e57d0574629b/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8", size = 2069475, upload-time = "2025-04-23T18:33:24.528Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/e9/1f7efbe20d0b2b10f6718944b5d8ece9152390904f29a78e68d4e7961159/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf", size = 2239013, upload-time = "2025-04-23T18:33:26.621Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/b2/5309c905a93811524a49b4e031e9851a6b00ff0fb668794472ea7746b448/pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb", size = 2238715, upload-time = "2025-04-23T18:33:28.656Z" },
+ { url = "https://files.pythonhosted.org/packages/32/56/8a7ca5d2cd2cda1d245d34b1c9a942920a718082ae8e54e5f3e5a58b7add/pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1", size = 2066757, upload-time = "2025-04-23T18:33:30.645Z" },
+]
+
+[[package]]
+name = "pygments"
+version = "2.19.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
+]
+
+[[package]]
+name = "pymdown-extensions"
+version = "10.16.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markdown" },
+ { name = "pyyaml" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/55/b3/6d2b3f149bc5413b0a29761c2c5832d8ce904a1d7f621e86616d96f505cc/pymdown_extensions-10.16.1.tar.gz", hash = "sha256:aace82bcccba3efc03e25d584e6a22d27a8e17caa3f4dd9f207e49b787aa9a91", size = 853277, upload-time = "2025-07-28T16:19:34.167Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e4/06/43084e6cbd4b3bc0e80f6be743b2e79fbc6eed8de9ad8c629939fa55d972/pymdown_extensions-10.16.1-py3-none-any.whl", hash = "sha256:d6ba157a6c03146a7fb122b2b9a121300056384eafeec9c9f9e584adfdb2a32d", size = 266178, upload-time = "2025-07-28T16:19:31.401Z" },
+]
+
+[[package]]
+name = "pyproject-api"
+version = "1.9.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "packaging" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/19/fd/437901c891f58a7b9096511750247535e891d2d5a5a6eefbc9386a2b41d5/pyproject_api-1.9.1.tar.gz", hash = "sha256:43c9918f49daab37e302038fc1aed54a8c7a91a9fa935d00b9a485f37e0f5335", size = 22710, upload-time = "2025-05-12T14:41:58.025Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ef/e6/c293c06695d4a3ab0260ef124a74ebadba5f4c511ce3a4259e976902c00b/pyproject_api-1.9.1-py3-none-any.whl", hash = "sha256:7d6238d92f8962773dd75b5f0c4a6a27cce092a14b623b811dba656f3b628948", size = 13158, upload-time = "2025-05-12T14:41:56.217Z" },
+]
+
+[[package]]
+name = "pyreadline3"
+version = "3.5.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/0f/49/4cea918a08f02817aabae639e3d0ac046fef9f9180518a3ad394e22da148/pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7", size = 99839, upload-time = "2024-09-19T02:40:10.062Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/5a/dc/491b7661614ab97483abf2056be1deee4dc2490ecbf7bff9ab5cdbac86e1/pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6", size = 83178, upload-time = "2024-09-19T02:40:08.598Z" },
+]
+
+[[package]]
+name = "pytest"
+version = "8.4.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+ { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
+ { name = "iniconfig" },
+ { name = "packaging" },
+ { name = "pluggy" },
+ { name = "pygments" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" },
+]
+
+[[package]]
+name = "pytest-asyncio"
+version = "1.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "backports-asyncio-runner", marker = "python_full_version < '3.11'" },
+ { name = "pytest" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/4e/51/f8794af39eeb870e87a8c8068642fc07bce0c854d6865d7dd0f2a9d338c2/pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea", size = 46652, upload-time = "2025-07-16T04:29:26.393Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157, upload-time = "2025-07-16T04:29:24.929Z" },
+]
+
+[[package]]
+name = "pytest-benchmark"
+version = "5.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "py-cpuinfo" },
+ { name = "pytest" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/39/d0/a8bd08d641b393db3be3819b03e2d9bb8760ca8479080a26a5f6e540e99c/pytest-benchmark-5.1.0.tar.gz", hash = "sha256:9ea661cdc292e8231f7cd4c10b0319e56a2118e2c09d9f50e1b3d150d2aca105", size = 337810, upload-time = "2024-10-30T11:51:48.521Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9e/d6/b41653199ea09d5969d4e385df9bbfd9a100f28ca7e824ce7c0a016e3053/pytest_benchmark-5.1.0-py3-none-any.whl", hash = "sha256:922de2dfa3033c227c96da942d1878191afa135a29485fb942e85dff1c592c89", size = 44259, upload-time = "2024-10-30T11:51:45.94Z" },
+]
+
+[[package]]
+name = "pytest-cov"
+version = "6.2.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "coverage", extra = ["toml"] },
+ { name = "pluggy" },
+ { name = "pytest" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/18/99/668cade231f434aaa59bbfbf49469068d2ddd945000621d3d165d2e7dd7b/pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2", size = 69432, upload-time = "2025-06-12T10:47:47.684Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/bc/16/4ea354101abb1287856baa4af2732be351c7bee728065aed451b678153fd/pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5", size = 24644, upload-time = "2025-06-12T10:47:45.932Z" },
+]
+
+[[package]]
+name = "pytest-html"
+version = "4.1.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "jinja2" },
+ { name = "pytest" },
+ { name = "pytest-metadata" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/bb/ab/4862dcb5a8a514bd87747e06b8d55483c0c9e987e1b66972336946e49b49/pytest_html-4.1.1.tar.gz", hash = "sha256:70a01e8ae5800f4a074b56a4cb1025c8f4f9b038bba5fe31e3c98eb996686f07", size = 150773, upload-time = "2023-11-07T15:44:28.975Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c8/c7/c160021cbecd956cc1a6f79e5fe155f7868b2e5b848f1320dad0b3e3122f/pytest_html-4.1.1-py3-none-any.whl", hash = "sha256:c8152cea03bd4e9bee6d525573b67bbc6622967b72b9628dda0ea3e2a0b5dd71", size = 23491, upload-time = "2023-11-07T15:44:27.149Z" },
+]
+
+[[package]]
+name = "pytest-json-report"
+version = "1.5.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pytest" },
+ { name = "pytest-metadata" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/4f/d3/765dae9712fcd68d820338908c1337e077d5fdadccd5cacf95b9b0bea278/pytest-json-report-1.5.0.tar.gz", hash = "sha256:2dde3c647851a19b5f3700729e8310a6e66efb2077d674f27ddea3d34dc615de", size = 21241, upload-time = "2022-03-15T21:03:10.2Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/81/35/d07400c715bf8a88aa0c1ee9c9eb6050ca7fe5b39981f0eea773feeb0681/pytest_json_report-1.5.0-py3-none-any.whl", hash = "sha256:9897b68c910b12a2e48dd849f9a284b2c79a732a8a9cb398452ddd23d3c8c325", size = 13222, upload-time = "2022-03-15T21:03:08.65Z" },
+]
+
+[[package]]
+name = "pytest-metadata"
+version = "3.1.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pytest" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a6/85/8c969f8bec4e559f8f2b958a15229a35495f5b4ce499f6b865eac54b878d/pytest_metadata-3.1.1.tar.gz", hash = "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8", size = 9952, upload-time = "2024-02-12T19:38:44.887Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3e/43/7e7b2ec865caa92f67b8f0e9231a798d102724ca4c0e1f414316be1c1ef2/pytest_metadata-3.1.1-py3-none-any.whl", hash = "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b", size = 11428, upload-time = "2024-02-12T19:38:42.531Z" },
+]
+
+[[package]]
+name = "pytest-mock"
+version = "3.14.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pytest" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/71/28/67172c96ba684058a4d24ffe144d64783d2a270d0af0d9e792737bddc75c/pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e", size = 33241, upload-time = "2025-05-26T13:58:45.167Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" },
+]
+
+[[package]]
+name = "pytest-timeout"
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pytest" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ac/82/4c9ecabab13363e72d880f2fb504c5f750433b2b6f16e99f4ec21ada284c/pytest_timeout-2.4.0.tar.gz", hash = "sha256:7e68e90b01f9eff71332b25001f85c75495fc4e3a836701876183c4bcfd0540a", size = 17973, upload-time = "2025-05-05T19:44:34.99Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" },
+]
+
+[[package]]
+name = "pytest-xdist"
+version = "3.8.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "execnet" },
+ { name = "pytest" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ca/31/d4e37e9e550c2b92a9cbc2e4d0b7420a27224968580b5a447f420847c975/pytest_xdist-3.8.0-py3-none-any.whl", hash = "sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88", size = 46396, upload-time = "2025-07-01T13:30:56.632Z" },
+]
+
+[[package]]
+name = "python-dateutil"
+version = "2.9.0.post0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "six" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" },
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/9b/95/a3fac87cb7158e231b5a6012e438c647e1a87f09f8e0d123acec8ab8bf71/PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", size = 184199, upload-time = "2024-08-06T20:31:40.178Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/7a/68bd47624dab8fd4afbfd3c48e3b79efe09098ae941de5b58abcbadff5cb/PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", size = 171758, upload-time = "2024-08-06T20:31:42.173Z" },
+ { url = "https://files.pythonhosted.org/packages/49/ee/14c54df452143b9ee9f0f29074d7ca5516a36edb0b4cc40c3f280131656f/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", size = 718463, upload-time = "2024-08-06T20:31:44.263Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/61/de363a97476e766574650d742205be468921a7b532aa2499fcd886b62530/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", size = 719280, upload-time = "2024-08-06T20:31:50.199Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/4e/1523cb902fd98355e2e9ea5e5eb237cbc5f3ad5f3075fa65087aa0ecb669/PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", size = 751239, upload-time = "2024-08-06T20:31:52.292Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/33/5504b3a9a4464893c32f118a9cc045190a91637b119a9c881da1cf6b7a72/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", size = 695802, upload-time = "2024-08-06T20:31:53.836Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/20/8347dcabd41ef3a3cdc4f7b7a2aff3d06598c8779faa189cdbf878b626a4/PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", size = 720527, upload-time = "2024-08-06T20:31:55.565Z" },
+ { url = "https://files.pythonhosted.org/packages/be/aa/5afe99233fb360d0ff37377145a949ae258aaab831bde4792b32650a4378/PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", size = 144052, upload-time = "2024-08-06T20:31:56.914Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/84/0fa4b06f6d6c958d207620fc60005e241ecedceee58931bb20138e1e5776/PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", size = 161774, upload-time = "2024-08-06T20:31:58.304Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/aa/7af4e81f7acba21a4c6be026da38fd2b872ca46226673c89a758ebdc4fd2/PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", size = 184612, upload-time = "2024-08-06T20:32:03.408Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", size = 172040, upload-time = "2024-08-06T20:32:04.926Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/0c/c804f5f922a9a6563bab712d8dcc70251e8af811fce4524d57c2c0fd49a4/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", size = 736829, upload-time = "2024-08-06T20:32:06.459Z" },
+ { url = "https://files.pythonhosted.org/packages/51/16/6af8d6a6b210c8e54f1406a6b9481febf9c64a3109c541567e35a49aa2e7/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", size = 764167, upload-time = "2024-08-06T20:32:08.338Z" },
+ { url = "https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", size = 762952, upload-time = "2024-08-06T20:32:14.124Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/97/ecc1abf4a823f5ac61941a9c00fe501b02ac3ab0e373c3857f7d4b83e2b6/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4", size = 735301, upload-time = "2024-08-06T20:32:16.17Z" },
+ { url = "https://files.pythonhosted.org/packages/45/73/0f49dacd6e82c9430e46f4a027baa4ca205e8b0a9dce1397f44edc23559d/PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", size = 756638, upload-time = "2024-08-06T20:32:18.555Z" },
+ { url = "https://files.pythonhosted.org/packages/22/5f/956f0f9fc65223a58fbc14459bf34b4cc48dec52e00535c79b8db361aabd/PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", size = 143850, upload-time = "2024-08-06T20:32:19.889Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", size = 161980, upload-time = "2024-08-06T20:32:21.273Z" },
+ { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" },
+ { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" },
+ { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" },
+ { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" },
+ { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" },
+ { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" },
+]
+
+[[package]]
+name = "pyyaml-env-tag"
+version = "1.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pyyaml" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/eb/2e/79c822141bfd05a853236b504869ebc6b70159afc570e1d5a20641782eaa/pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff", size = 5737, upload-time = "2025-05-13T15:24:01.64Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/11/432f32f8097b03e3cd5fe57e88efb685d964e2e5178a48ed61e841f7fdce/pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04", size = 4722, upload-time = "2025-05-13T15:23:59.629Z" },
+]
+
+[[package]]
+name = "requests"
+version = "2.32.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "charset-normalizer" },
+ { name = "idna" },
+ { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" },
+]
+
+[[package]]
+name = "rich"
+version = "14.1.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "markdown-it-py" },
+ { name = "pygments" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fe/75/af448d8e52bf1d8fa6a9d089ca6c07ff4453d86c65c145d0a300bb073b9b/rich-14.1.0.tar.gz", hash = "sha256:e497a48b844b0320d45007cdebfeaeed8db2a4f4bcf49f15e455cfc4af11eaa8", size = 224441, upload-time = "2025-07-25T07:32:58.125Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e3/30/3c4d035596d3cf444529e0b2953ad0466f6049528a879d27534700580395/rich-14.1.0-py3-none-any.whl", hash = "sha256:536f5f1785986d6dbdea3c75205c473f970777b4a0d6c6dd1b696aa05a3fa04f", size = 243368, upload-time = "2025-07-25T07:32:56.73Z" },
+]
+
+[[package]]
+name = "ruamel-yaml"
+version = "0.18.14"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "ruamel-yaml-clib", marker = "python_full_version < '3.14' and platform_python_implementation == 'CPython'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/39/87/6da0df742a4684263261c253f00edd5829e6aca970fff69e75028cccc547/ruamel.yaml-0.18.14.tar.gz", hash = "sha256:7227b76aaec364df15936730efbf7d72b30c0b79b1d578bbb8e3dcb2d81f52b7", size = 145511, upload-time = "2025-06-09T08:51:09.828Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/af/6d/6fe4805235e193aad4aaf979160dd1f3c487c57d48b810c816e6e842171b/ruamel.yaml-0.18.14-py3-none-any.whl", hash = "sha256:710ff198bb53da66718c7db27eec4fbcc9aa6ca7204e4c1df2f282b6fe5eb6b2", size = 118570, upload-time = "2025-06-09T08:51:06.348Z" },
+]
+
+[[package]]
+name = "ruamel-yaml-clib"
+version = "0.2.12"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/20/84/80203abff8ea4993a87d823a5f632e4d92831ef75d404c9fc78d0176d2b5/ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f", size = 225315, upload-time = "2024-10-20T10:10:56.22Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/70/57/40a958e863e299f0c74ef32a3bde9f2d1ea8d69669368c0c502a0997f57f/ruamel.yaml.clib-0.2.12-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:11f891336688faf5156a36293a9c362bdc7c88f03a8a027c2c1d8e0bcde998e5", size = 131301, upload-time = "2024-10-20T10:12:35.876Z" },
+ { url = "https://files.pythonhosted.org/packages/98/a8/29a3eb437b12b95f50a6bcc3d7d7214301c6c529d8fdc227247fa84162b5/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:a606ef75a60ecf3d924613892cc603b154178ee25abb3055db5062da811fd969", size = 633728, upload-time = "2024-10-20T10:12:37.858Z" },
+ { url = "https://files.pythonhosted.org/packages/35/6d/ae05a87a3ad540259c3ad88d71275cbd1c0f2d30ae04c65dcbfb6dcd4b9f/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd5415dded15c3822597455bc02bcd66e81ef8b7a48cb71a33628fc9fdde39df", size = 722230, upload-time = "2024-10-20T10:12:39.457Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/b7/20c6f3c0b656fe609675d69bc135c03aac9e3865912444be6339207b6648/ruamel.yaml.clib-0.2.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f66efbc1caa63c088dead1c4170d148eabc9b80d95fb75b6c92ac0aad2437d76", size = 686712, upload-time = "2024-10-20T10:12:41.119Z" },
+ { url = "https://files.pythonhosted.org/packages/cd/11/d12dbf683471f888d354dac59593873c2b45feb193c5e3e0f2ebf85e68b9/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:22353049ba4181685023b25b5b51a574bce33e7f51c759371a7422dcae5402a6", size = 663936, upload-time = "2024-10-21T11:26:37.419Z" },
+ { url = "https://files.pythonhosted.org/packages/72/14/4c268f5077db5c83f743ee1daeb236269fa8577133a5cfa49f8b382baf13/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:932205970b9f9991b34f55136be327501903f7c66830e9760a8ffb15b07f05cd", size = 696580, upload-time = "2024-10-21T11:26:39.503Z" },
+ { url = "https://files.pythonhosted.org/packages/30/fc/8cd12f189c6405a4c1cf37bd633aa740a9538c8e40497c231072d0fef5cf/ruamel.yaml.clib-0.2.12-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a52d48f4e7bf9005e8f0a89209bf9a73f7190ddf0489eee5eb51377385f59f2a", size = 663393, upload-time = "2024-12-11T19:58:13.873Z" },
+ { url = "https://files.pythonhosted.org/packages/80/29/c0a017b704aaf3cbf704989785cd9c5d5b8ccec2dae6ac0c53833c84e677/ruamel.yaml.clib-0.2.12-cp310-cp310-win32.whl", hash = "sha256:3eac5a91891ceb88138c113f9db04f3cebdae277f5d44eaa3651a4f573e6a5da", size = 100326, upload-time = "2024-10-20T10:12:42.967Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/65/fa39d74db4e2d0cd252355732d966a460a41cd01c6353b820a0952432839/ruamel.yaml.clib-0.2.12-cp310-cp310-win_amd64.whl", hash = "sha256:ab007f2f5a87bd08ab1499bdf96f3d5c6ad4dcfa364884cb4549aa0154b13a28", size = 118079, upload-time = "2024-10-20T10:12:44.117Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/8f/683c6ad562f558cbc4f7c029abcd9599148c51c54b5ef0f24f2638da9fbb/ruamel.yaml.clib-0.2.12-cp311-cp311-macosx_13_0_arm64.whl", hash = "sha256:4a6679521a58256a90b0d89e03992c15144c5f3858f40d7c18886023d7943db6", size = 132224, upload-time = "2024-10-20T10:12:45.162Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/d2/b79b7d695e2f21da020bd44c782490578f300dd44f0a4c57a92575758a76/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:d84318609196d6bd6da0edfa25cedfbabd8dbde5140a0a23af29ad4b8f91fb1e", size = 641480, upload-time = "2024-10-20T10:12:46.758Z" },
+ { url = "https://files.pythonhosted.org/packages/68/6e/264c50ce2a31473a9fdbf4fa66ca9b2b17c7455b31ef585462343818bd6c/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bb43a269eb827806502c7c8efb7ae7e9e9d0573257a46e8e952f4d4caba4f31e", size = 739068, upload-time = "2024-10-20T10:12:48.605Z" },
+ { url = "https://files.pythonhosted.org/packages/86/29/88c2567bc893c84d88b4c48027367c3562ae69121d568e8a3f3a8d363f4d/ruamel.yaml.clib-0.2.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:811ea1594b8a0fb466172c384267a4e5e367298af6b228931f273b111f17ef52", size = 703012, upload-time = "2024-10-20T10:12:51.124Z" },
+ { url = "https://files.pythonhosted.org/packages/11/46/879763c619b5470820f0cd6ca97d134771e502776bc2b844d2adb6e37753/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cf12567a7b565cbf65d438dec6cfbe2917d3c1bdddfce84a9930b7d35ea59642", size = 704352, upload-time = "2024-10-21T11:26:41.438Z" },
+ { url = "https://files.pythonhosted.org/packages/02/80/ece7e6034256a4186bbe50dee28cd032d816974941a6abf6a9d65e4228a7/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7dd5adc8b930b12c8fc5b99e2d535a09889941aa0d0bd06f4749e9a9397c71d2", size = 737344, upload-time = "2024-10-21T11:26:43.62Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/ca/e4106ac7e80efbabdf4bf91d3d32fc424e41418458251712f5672eada9ce/ruamel.yaml.clib-0.2.12-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1492a6051dab8d912fc2adeef0e8c72216b24d57bd896ea607cb90bb0c4981d3", size = 714498, upload-time = "2024-12-11T19:58:15.592Z" },
+ { url = "https://files.pythonhosted.org/packages/67/58/b1f60a1d591b771298ffa0428237afb092c7f29ae23bad93420b1eb10703/ruamel.yaml.clib-0.2.12-cp311-cp311-win32.whl", hash = "sha256:bd0a08f0bab19093c54e18a14a10b4322e1eacc5217056f3c063bd2f59853ce4", size = 100205, upload-time = "2024-10-20T10:12:52.865Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/4f/b52f634c9548a9291a70dfce26ca7ebce388235c93588a1068028ea23fcc/ruamel.yaml.clib-0.2.12-cp311-cp311-win_amd64.whl", hash = "sha256:a274fb2cb086c7a3dea4322ec27f4cb5cc4b6298adb583ab0e211a4682f241eb", size = 118185, upload-time = "2024-10-20T10:12:54.652Z" },
+ { url = "https://files.pythonhosted.org/packages/48/41/e7a405afbdc26af961678474a55373e1b323605a4f5e2ddd4a80ea80f628/ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632", size = 133433, upload-time = "2024-10-20T10:12:55.657Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/b0/b850385604334c2ce90e3ee1013bd911aedf058a934905863a6ea95e9eb4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d", size = 647362, upload-time = "2024-10-20T10:12:57.155Z" },
+ { url = "https://files.pythonhosted.org/packages/44/d0/3f68a86e006448fb6c005aee66565b9eb89014a70c491d70c08de597f8e4/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c", size = 754118, upload-time = "2024-10-20T10:12:58.501Z" },
+ { url = "https://files.pythonhosted.org/packages/52/a9/d39f3c5ada0a3bb2870d7db41901125dbe2434fa4f12ca8c5b83a42d7c53/ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd", size = 706497, upload-time = "2024-10-20T10:13:00.211Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/fa/097e38135dadd9ac25aecf2a54be17ddf6e4c23e43d538492a90ab3d71c6/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31", size = 698042, upload-time = "2024-10-21T11:26:46.038Z" },
+ { url = "https://files.pythonhosted.org/packages/ec/d5/a659ca6f503b9379b930f13bc6b130c9f176469b73b9834296822a83a132/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680", size = 745831, upload-time = "2024-10-21T11:26:47.487Z" },
+ { url = "https://files.pythonhosted.org/packages/db/5d/36619b61ffa2429eeaefaab4f3374666adf36ad8ac6330d855848d7d36fd/ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d", size = 715692, upload-time = "2024-12-11T19:58:17.252Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/82/85cb92f15a4231c89b95dfe08b09eb6adca929ef7df7e17ab59902b6f589/ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5", size = 98777, upload-time = "2024-10-20T10:13:01.395Z" },
+ { url = "https://files.pythonhosted.org/packages/d7/8f/c3654f6f1ddb75daf3922c3d8fc6005b1ab56671ad56ffb874d908bfa668/ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4", size = 115523, upload-time = "2024-10-20T10:13:02.768Z" },
+ { url = "https://files.pythonhosted.org/packages/29/00/4864119668d71a5fa45678f380b5923ff410701565821925c69780356ffa/ruamel.yaml.clib-0.2.12-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:4c8c5d82f50bb53986a5e02d1b3092b03622c02c2eb78e29bec33fd9593bae1a", size = 132011, upload-time = "2024-10-20T10:13:04.377Z" },
+ { url = "https://files.pythonhosted.org/packages/7f/5e/212f473a93ae78c669ffa0cb051e3fee1139cb2d385d2ae1653d64281507/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux2014_aarch64.whl", hash = "sha256:e7e3736715fbf53e9be2a79eb4db68e4ed857017344d697e8b9749444ae57475", size = 642488, upload-time = "2024-10-20T10:13:05.906Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/8f/ecfbe2123ade605c49ef769788f79c38ddb1c8fa81e01f4dbf5cf1a44b16/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7e75b4965e1d4690e93021adfcecccbca7d61c7bddd8e22406ef2ff20d74ef", size = 745066, upload-time = "2024-10-20T10:13:07.26Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/a9/28f60726d29dfc01b8decdb385de4ced2ced9faeb37a847bd5cf26836815/ruamel.yaml.clib-0.2.12-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96777d473c05ee3e5e3c3e999f5d23c6f4ec5b0c38c098b3a5229085f74236c6", size = 701785, upload-time = "2024-10-20T10:13:08.504Z" },
+ { url = "https://files.pythonhosted.org/packages/84/7e/8e7ec45920daa7f76046578e4f677a3215fe8f18ee30a9cb7627a19d9b4c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:3bc2a80e6420ca8b7d3590791e2dfc709c88ab9152c00eeb511c9875ce5778bf", size = 693017, upload-time = "2024-10-21T11:26:48.866Z" },
+ { url = "https://files.pythonhosted.org/packages/c5/b3/d650eaade4ca225f02a648321e1ab835b9d361c60d51150bac49063b83fa/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e188d2699864c11c36cdfdada94d781fd5d6b0071cd9c427bceb08ad3d7c70e1", size = 741270, upload-time = "2024-10-21T11:26:50.213Z" },
+ { url = "https://files.pythonhosted.org/packages/87/b8/01c29b924dcbbed75cc45b30c30d565d763b9c4d540545a0eeecffb8f09c/ruamel.yaml.clib-0.2.12-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4f6f3eac23941b32afccc23081e1f50612bdbe4e982012ef4f5797986828cd01", size = 709059, upload-time = "2024-12-11T19:58:18.846Z" },
+ { url = "https://files.pythonhosted.org/packages/30/8c/ed73f047a73638257aa9377ad356bea4d96125b305c34a28766f4445cc0f/ruamel.yaml.clib-0.2.12-cp313-cp313-win32.whl", hash = "sha256:6442cb36270b3afb1b4951f060eccca1ce49f3d087ca1ca4563a6eb479cb3de6", size = 98583, upload-time = "2024-10-20T10:13:09.658Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/85/e8e751d8791564dd333d5d9a4eab0a7a115f7e349595417fd50ecae3395c/ruamel.yaml.clib-0.2.12-cp313-cp313-win_amd64.whl", hash = "sha256:e5b8daf27af0b90da7bb903a876477a9e6d7270be6146906b276605997c7e9a3", size = 115190, upload-time = "2024-10-20T10:13:10.66Z" },
+]
+
+[[package]]
+name = "ruff"
+version = "0.12.8"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/4b/da/5bd7565be729e86e1442dad2c9a364ceeff82227c2dece7c29697a9795eb/ruff-0.12.8.tar.gz", hash = "sha256:4cb3a45525176e1009b2b64126acf5f9444ea59066262791febf55e40493a033", size = 5242373, upload-time = "2025-08-07T19:05:47.268Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c9/1e/c843bfa8ad1114fab3eb2b78235dda76acd66384c663a4e0415ecc13aa1e/ruff-0.12.8-py3-none-linux_armv6l.whl", hash = "sha256:63cb5a5e933fc913e5823a0dfdc3c99add73f52d139d6cd5cc8639d0e0465513", size = 11675315, upload-time = "2025-08-07T19:05:06.15Z" },
+ { url = "https://files.pythonhosted.org/packages/24/ee/af6e5c2a8ca3a81676d5480a1025494fd104b8896266502bb4de2a0e8388/ruff-0.12.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9a9bbe28f9f551accf84a24c366c1aa8774d6748438b47174f8e8565ab9dedbc", size = 12456653, upload-time = "2025-08-07T19:05:09.759Z" },
+ { url = "https://files.pythonhosted.org/packages/99/9d/e91f84dfe3866fa648c10512904991ecc326fd0b66578b324ee6ecb8f725/ruff-0.12.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2fae54e752a3150f7ee0e09bce2e133caf10ce9d971510a9b925392dc98d2fec", size = 11659690, upload-time = "2025-08-07T19:05:12.551Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/ac/a363d25ec53040408ebdd4efcee929d48547665858ede0505d1d8041b2e5/ruff-0.12.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c0acbcf01206df963d9331b5838fb31f3b44fa979ee7fa368b9b9057d89f4a53", size = 11896923, upload-time = "2025-08-07T19:05:14.821Z" },
+ { url = "https://files.pythonhosted.org/packages/58/9f/ea356cd87c395f6ade9bb81365bd909ff60860975ca1bc39f0e59de3da37/ruff-0.12.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ae3e7504666ad4c62f9ac8eedb52a93f9ebdeb34742b8b71cd3cccd24912719f", size = 11477612, upload-time = "2025-08-07T19:05:16.712Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/46/92e8fa3c9dcfd49175225c09053916cb97bb7204f9f899c2f2baca69e450/ruff-0.12.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb82efb5d35d07497813a1c5647867390a7d83304562607f3579602fa3d7d46f", size = 13182745, upload-time = "2025-08-07T19:05:18.709Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/c4/f2176a310f26e6160deaf661ef60db6c3bb62b7a35e57ae28f27a09a7d63/ruff-0.12.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:dbea798fc0065ad0b84a2947b0aff4233f0cb30f226f00a2c5850ca4393de609", size = 14206885, upload-time = "2025-08-07T19:05:21.025Z" },
+ { url = "https://files.pythonhosted.org/packages/87/9d/98e162f3eeeb6689acbedbae5050b4b3220754554526c50c292b611d3a63/ruff-0.12.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:49ebcaccc2bdad86fd51b7864e3d808aad404aab8df33d469b6e65584656263a", size = 13639381, upload-time = "2025-08-07T19:05:23.423Z" },
+ { url = "https://files.pythonhosted.org/packages/81/4e/1b7478b072fcde5161b48f64774d6edd59d6d198e4ba8918d9f4702b8043/ruff-0.12.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ac9c570634b98c71c88cb17badd90f13fc076a472ba6ef1d113d8ed3df109fb", size = 12613271, upload-time = "2025-08-07T19:05:25.507Z" },
+ { url = "https://files.pythonhosted.org/packages/e8/67/0c3c9179a3ad19791ef1b8f7138aa27d4578c78700551c60d9260b2c660d/ruff-0.12.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:560e0cd641e45591a3e42cb50ef61ce07162b9c233786663fdce2d8557d99818", size = 12847783, upload-time = "2025-08-07T19:05:28.14Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/2a/0b6ac3dd045acf8aa229b12c9c17bb35508191b71a14904baf99573a21bd/ruff-0.12.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:71c83121512e7743fba5a8848c261dcc454cafb3ef2934a43f1b7a4eb5a447ea", size = 11702672, upload-time = "2025-08-07T19:05:30.413Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/ee/f9fdc9f341b0430110de8b39a6ee5fa68c5706dc7c0aa940817947d6937e/ruff-0.12.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:de4429ef2ba091ecddedd300f4c3f24bca875d3d8b23340728c3cb0da81072c3", size = 11440626, upload-time = "2025-08-07T19:05:32.492Z" },
+ { url = "https://files.pythonhosted.org/packages/89/fb/b3aa2d482d05f44e4d197d1de5e3863feb13067b22c571b9561085c999dc/ruff-0.12.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:a2cab5f60d5b65b50fba39a8950c8746df1627d54ba1197f970763917184b161", size = 12462162, upload-time = "2025-08-07T19:05:34.449Z" },
+ { url = "https://files.pythonhosted.org/packages/18/9f/5c5d93e1d00d854d5013c96e1a92c33b703a0332707a7cdbd0a4880a84fb/ruff-0.12.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:45c32487e14f60b88aad6be9fd5da5093dbefb0e3e1224131cb1d441d7cb7d46", size = 12913212, upload-time = "2025-08-07T19:05:36.541Z" },
+ { url = "https://files.pythonhosted.org/packages/71/13/ab9120add1c0e4604c71bfc2e4ef7d63bebece0cfe617013da289539cef8/ruff-0.12.8-py3-none-win32.whl", hash = "sha256:daf3475060a617fd5bc80638aeaf2f5937f10af3ec44464e280a9d2218e720d3", size = 11694382, upload-time = "2025-08-07T19:05:38.468Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/dc/a2873b7c5001c62f46266685863bee2888caf469d1edac84bf3242074be2/ruff-0.12.8-py3-none-win_amd64.whl", hash = "sha256:7209531f1a1fcfbe8e46bcd7ab30e2f43604d8ba1c49029bb420b103d0b5f76e", size = 12740482, upload-time = "2025-08-07T19:05:40.391Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/5c/799a1efb8b5abab56e8a9f2a0b72d12bd64bb55815e9476c7d0a2887d2f7/ruff-0.12.8-py3-none-win_arm64.whl", hash = "sha256:c90e1a334683ce41b0e7a04f41790c429bf5073b62c1ae701c9dc5b3d14f0749", size = 11884718, upload-time = "2025-08-07T19:05:42.866Z" },
+]
+
+[[package]]
+name = "safety"
+version = "3.2.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "authlib" },
+ { name = "click" },
+ { name = "dparse" },
+ { name = "filelock" },
+ { name = "jinja2" },
+ { name = "marshmallow" },
+ { name = "packaging" },
+ { name = "pydantic" },
+ { name = "requests" },
+ { name = "rich" },
+ { name = "ruamel-yaml" },
+ { name = "safety-schemas" },
+ { name = "setuptools" },
+ { name = "typer" },
+ { name = "typing-extensions" },
+ { name = "urllib3" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/af/bb/723f294df65939d61cd35cba6c9c6c95bd2ce7f3822a45ba9e836cf034e3/safety-3.2.4.tar.gz", hash = "sha256:bac0202016d736a2118057964a0e3983fa20ff2563fd103cac3f3ac1ed3fea11", size = 179364, upload-time = "2024-07-04T15:08:29.437Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/59/6c/bf6fcfbf1daf5add740cd7f276e6c5f6a383e10f12f08c47bc321a076e4d/safety-3.2.4-py3-none-any.whl", hash = "sha256:242ff7ae448d7fb2ea455c90f44e3f2ca45be9c8559b2fe9dfc89617164a0f17", size = 147009, upload-time = "2024-07-04T15:08:27.412Z" },
+]
+
+[[package]]
+name = "safety-schemas"
+version = "0.0.5"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "dparse" },
+ { name = "packaging" },
+ { name = "pydantic" },
+ { name = "ruamel-yaml" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/4d/a9/b29e8e03c75588b3680e63e3b3b33a6e2e4d654efdc78cfca5d12608ec0b/safety_schemas-0.0.5.tar.gz", hash = "sha256:0de5fc9a53d4423644a8ce9a17a2e474714aa27e57f3506146e95a41710ff104", size = 38716, upload-time = "2024-08-23T14:27:28.974Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/35/da/3386abf19e2ed3ecf783b253a8a8d5eff0c2ef9e5f5f875e51c90a9521eb/safety_schemas-0.0.5-py3-none-any.whl", hash = "sha256:6ac9eb71e60f0d4e944597c01dd48d6d8cd3d467c94da4aba3702a05a3a6ab4f", size = 27027, upload-time = "2024-08-23T14:27:30.262Z" },
+]
+
+[[package]]
+name = "setuptools"
+version = "80.9.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" },
+]
+
+[[package]]
+name = "shellingham"
+version = "1.5.4"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
+]
+
+[[package]]
+name = "six"
+version = "1.17.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
+]
+
+[[package]]
+name = "starlette"
+version = "0.47.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/04/57/d062573f391d062710d4088fa1369428c38d51460ab6fedff920efef932e/starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8", size = 2583948, upload-time = "2025-07-20T17:31:58.522Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" },
+]
+
+[[package]]
+name = "stevedore"
+version = "5.4.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "pbr" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/28/3f/13cacea96900bbd31bb05c6b74135f85d15564fc583802be56976c940470/stevedore-5.4.1.tar.gz", hash = "sha256:3135b5ae50fe12816ef291baff420acb727fcd356106e3e9cbfa9e5985cd6f4b", size = 513858, upload-time = "2025-02-20T14:03:57.285Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f7/45/8c4ebc0c460e6ec38e62ab245ad3c7fc10b210116cea7c16d61602aa9558/stevedore-5.4.1-py3-none-any.whl", hash = "sha256:d10a31c7b86cba16c1f6e8d15416955fc797052351a56af15e608ad20811fcfe", size = 49533, upload-time = "2025-02-20T14:03:55.849Z" },
+]
+
+[[package]]
+name = "sympy"
+version = "1.14.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "mpmath" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/83/d3/803453b36afefb7c2bb238361cd4ae6125a569b4db67cd9e79846ba2d68c/sympy-1.14.0.tar.gz", hash = "sha256:d3d3fe8df1e5a0b42f0e7bdf50541697dbe7d23746e894990c030e2b05e72517", size = 7793921, upload-time = "2025-04-27T18:05:01.611Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a2/09/77d55d46fd61b4a135c444fc97158ef34a095e5681d0a6c10b75bf356191/sympy-1.14.0-py3-none-any.whl", hash = "sha256:e091cc3e99d2141a0ba2847328f5479b05d94a6635cb96148ccb3f34671bd8f5", size = 6299353, upload-time = "2025-04-27T18:04:59.103Z" },
+]
+
+[[package]]
+name = "tensorrt"
+version = "10.9.0.34"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "tensorrt-cu12", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+]
+sdist = { url = "https://pypi.nvidia.com/tensorrt/tensorrt-10.9.0.34.tar.gz", hash = "sha256:c68f6ca5ebd017bf1ebf40c8c92e3be619326882c738b8597d20720cf0376d09" }
+
+[[package]]
+name = "tensorrt"
+version = "10.12.0.36"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+dependencies = [
+ { name = "tensorrt-cu12", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+sdist = { url = "https://pypi.nvidia.com/tensorrt/tensorrt-10.12.0.36.tar.gz", hash = "sha256:b246a830c26713e097b73151917e101cfb81aa0e7274c3c3b4c1f9f8b886be2e" }
+
+[[package]]
+name = "tensorrt-cu12"
+version = "10.9.0.34"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "tensorrt-cu12-bindings", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "tensorrt-cu12-libs", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+]
+sdist = { url = "https://pypi.nvidia.com/tensorrt-cu12/tensorrt_cu12-10.9.0.34.tar.gz", hash = "sha256:4b0472164c2e0f2956f3f9dd0b847d3c11ca3138bbb6f53d788da0f84a374182" }
+
+[[package]]
+name = "tensorrt-cu12"
+version = "10.12.0.36"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+dependencies = [
+ { name = "tensorrt-cu12-bindings", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "tensorrt-cu12-libs", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+sdist = { url = "https://pypi.nvidia.com/tensorrt-cu12/tensorrt_cu12-10.12.0.36.tar.gz", hash = "sha256:aedeee0195c042592ac6b0536b19bc8cdbb1a548f35e09d24fbe78e1c76217c5" }
+
+[[package]]
+name = "tensorrt-cu12-bindings"
+version = "10.9.0.34"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.9.0.34-cp310-none-manylinux_2_31_aarch64.whl", hash = "sha256:2f1d763a4661e562aa102bf74540279d4f80cbc0464de6922779667b3451aaf9" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.9.0.34-cp311-none-manylinux_2_31_aarch64.whl", hash = "sha256:523ffb08efb5d0939afabc9438715d679a4149ed25ca7728a1864dc6a6f1f8e0" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.9.0.34-cp312-none-manylinux_2_31_aarch64.whl", hash = "sha256:2455766e8d76b2f43338fc33e9dce92a847a1b86ce19cc6acc258ff71e834a93" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.9.0.34-cp313-none-manylinux_2_31_aarch64.whl", hash = "sha256:539018b1c9e5e39c7e61ba1f9dc21c1e45d82875743ac0839e229e88c3c4c4dc" },
+]
+
+[[package]]
+name = "tensorrt-cu12-bindings"
+version = "10.12.0.36"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp310-none-manylinux_2_28_x86_64.whl", hash = "sha256:7ecdb6fc2555caed7d4fbbd8158ed7ced64e230c125484f62a5369c40dcc70e5" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp310-none-manylinux_2_31_aarch64.whl", hash = "sha256:d8548ab5976ca5c91279c68ee77f4c892e03460709cfa3fbd2a22aa8123cb731" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp310-none-win_amd64.whl", hash = "sha256:71be162a77ec9d5165d8c6ffad95179882e2c8604a68d7e21933258c182a8c0a" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp311-none-manylinux_2_28_x86_64.whl", hash = "sha256:58cf45605bb330e86f8ad49bc8997ed68cfdf5b09da229534fb7f84aa3fe5bf4" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp311-none-manylinux_2_31_aarch64.whl", hash = "sha256:ae0866a89caaeada1c16776de85413a523f78f53b1fd83f1b903c39eed264d82" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp311-none-win_amd64.whl", hash = "sha256:9d6687f056ce603c6a97235fd4291330c06410ac5b899db412597a06720b02bc" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp312-none-manylinux_2_28_x86_64.whl", hash = "sha256:fb3a2ce96c7472a46bbee2030ce6a54fd6a32deda401c1c67d9de057550e0171" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp312-none-manylinux_2_31_aarch64.whl", hash = "sha256:f5128b8b2a379e65c09745ba97df58abf3a418cbfd6508d37f76121d9bdd3bc8" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp312-none-win_amd64.whl", hash = "sha256:408af91113c22f58f2f08404bfc1548baa8d78ce6126bd543acdc9d5819662ff" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp313-none-manylinux_2_28_x86_64.whl", hash = "sha256:0eb8d3e41279b1d0d329b85372d5d720c8d2ff1228f6273142d717b44d75935b" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp313-none-manylinux_2_31_aarch64.whl", hash = "sha256:a850992cad842340e6fed41fe74f529064064ff61881d50ef5a2be1816526f9b" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-bindings/tensorrt_cu12_bindings-10.12.0.36-cp313-none-win_amd64.whl", hash = "sha256:3a1709e9dbaaef0f005681637a7d698f8609fec250716bbd184e3f3c40d8c4ea" },
+]
+
+[[package]]
+name = "tensorrt-cu12-libs"
+version = "10.9.0.34"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "nvidia-cuda-runtime-cu12", version = "12.9.79", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-libs/tensorrt_cu12_libs-10.9.0.34-py2.py3-none-manylinux_2_31_aarch64.whl", hash = "sha256:ffb9568e47266a6ac6f03326dc3f0e84dea6fb680d51bc2ca5e6c2bf3b5cca40" },
+]
+
+[[package]]
+name = "tensorrt-cu12-libs"
+version = "10.12.0.36"
+source = { registry = "https://pypi.nvidia.com/" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+dependencies = [
+ { name = "nvidia-cuda-runtime-cu12", version = "12.8.90", source = { registry = "https://pypi.nvidia.com/" }, marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+ { name = "nvidia-cuda-runtime-cu12", version = "12.9.79", source = { registry = "https://pypi.nvidia.com/" }, marker = "(platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux') or (platform_machine != 'aarch64' and sys_platform == 'darwin') or ('tegra' not in platform_release and sys_platform == 'darwin')" },
+]
+wheels = [
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-libs/tensorrt_cu12_libs-10.12.0.36-py2.py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:3910039e1d49de0edfdc8bf273e40ad4b85a9d57c7c383fe0e22f75417df9610" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-libs/tensorrt_cu12_libs-10.12.0.36-py2.py3-none-manylinux_2_31_aarch64.whl", hash = "sha256:1c117effa7318b65508457e9a11e67941859c8e5c346b59fd0090f66be28f2f4" },
+ { url = "https://pypi.nvidia.com/tensorrt-cu12-libs/tensorrt_cu12_libs-10.12.0.36-py2.py3-none-win_amd64.whl", hash = "sha256:d0e22a1fc312486ab980c2b19ccaadb74123829a51a56c69f6dd2b6d356a74e9" },
+]
+
+[[package]]
+name = "tomli"
+version = "2.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/18/87/302344fed471e44a87289cf4967697d07e532f2421fdaf868a303cbae4ff/tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff", size = 17175, upload-time = "2024-11-27T22:38:36.873Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/43/ca/75707e6efa2b37c77dadb324ae7d9571cb424e61ea73fad7c56c2d14527f/tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249", size = 131077, upload-time = "2024-11-27T22:37:54.956Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/16/51ae563a8615d472fdbffc43a3f3d46588c264ac4f024f63f01283becfbb/tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6", size = 123429, upload-time = "2024-11-27T22:37:56.698Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/dd/4f6cd1e7b160041db83c694abc78e100473c15d54620083dbd5aae7b990e/tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a", size = 226067, upload-time = "2024-11-27T22:37:57.63Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/6b/c54ede5dc70d648cc6361eaf429304b02f2871a345bbdd51e993d6cdf550/tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee", size = 236030, upload-time = "2024-11-27T22:37:59.344Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/47/999514fa49cfaf7a92c805a86c3c43f4215621855d151b61c602abb38091/tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e", size = 240898, upload-time = "2024-11-27T22:38:00.429Z" },
+ { url = "https://files.pythonhosted.org/packages/73/41/0a01279a7ae09ee1573b423318e7934674ce06eb33f50936655071d81a24/tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4", size = 229894, upload-time = "2024-11-27T22:38:02.094Z" },
+ { url = "https://files.pythonhosted.org/packages/55/18/5d8bc5b0a0362311ce4d18830a5d28943667599a60d20118074ea1b01bb7/tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106", size = 245319, upload-time = "2024-11-27T22:38:03.206Z" },
+ { url = "https://files.pythonhosted.org/packages/92/a3/7ade0576d17f3cdf5ff44d61390d4b3febb8a9fc2b480c75c47ea048c646/tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8", size = 238273, upload-time = "2024-11-27T22:38:04.217Z" },
+ { url = "https://files.pythonhosted.org/packages/72/6f/fa64ef058ac1446a1e51110c375339b3ec6be245af9d14c87c4a6412dd32/tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff", size = 98310, upload-time = "2024-11-27T22:38:05.908Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/1c/4a2dcde4a51b81be3530565e92eda625d94dafb46dbeb15069df4caffc34/tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b", size = 108309, upload-time = "2024-11-27T22:38:06.812Z" },
+ { url = "https://files.pythonhosted.org/packages/52/e1/f8af4c2fcde17500422858155aeb0d7e93477a0d59a98e56cbfe75070fd0/tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea", size = 132762, upload-time = "2024-11-27T22:38:07.731Z" },
+ { url = "https://files.pythonhosted.org/packages/03/b8/152c68bb84fc00396b83e7bbddd5ec0bd3dd409db4195e2a9b3e398ad2e3/tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8", size = 123453, upload-time = "2024-11-27T22:38:09.384Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/d6/fc9267af9166f79ac528ff7e8c55c8181ded34eb4b0e93daa767b8841573/tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192", size = 233486, upload-time = "2024-11-27T22:38:10.329Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/51/51c3f2884d7bab89af25f678447ea7d297b53b5a3b5730a7cb2ef6069f07/tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222", size = 242349, upload-time = "2024-11-27T22:38:11.443Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/df/bfa89627d13a5cc22402e441e8a931ef2108403db390ff3345c05253935e/tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77", size = 252159, upload-time = "2024-11-27T22:38:13.099Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/6e/fa2b916dced65763a5168c6ccb91066f7639bdc88b48adda990db10c8c0b/tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6", size = 237243, upload-time = "2024-11-27T22:38:14.766Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/04/885d3b1f650e1153cbb93a6a9782c58a972b94ea4483ae4ac5cedd5e4a09/tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd", size = 259645, upload-time = "2024-11-27T22:38:15.843Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/de/6b432d66e986e501586da298e28ebeefd3edc2c780f3ad73d22566034239/tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e", size = 244584, upload-time = "2024-11-27T22:38:17.645Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/9a/47c0449b98e6e7d1be6cbac02f93dd79003234ddc4aaab6ba07a9a7482e2/tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98", size = 98875, upload-time = "2024-11-27T22:38:19.159Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/60/9b9638f081c6f1261e2688bd487625cd1e660d0a85bd469e91d8db969734/tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4", size = 109418, upload-time = "2024-11-27T22:38:20.064Z" },
+ { url = "https://files.pythonhosted.org/packages/04/90/2ee5f2e0362cb8a0b6499dc44f4d7d48f8fff06d28ba46e6f1eaa61a1388/tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7", size = 132708, upload-time = "2024-11-27T22:38:21.659Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/ec/46b4108816de6b385141f082ba99e315501ccd0a2ea23db4a100dd3990ea/tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c", size = 123582, upload-time = "2024-11-27T22:38:22.693Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/bd/b470466d0137b37b68d24556c38a0cc819e8febe392d5b199dcd7f578365/tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13", size = 232543, upload-time = "2024-11-27T22:38:24.367Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/e5/82e80ff3b751373f7cead2815bcbe2d51c895b3c990686741a8e56ec42ab/tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281", size = 241691, upload-time = "2024-11-27T22:38:26.081Z" },
+ { url = "https://files.pythonhosted.org/packages/05/7e/2a110bc2713557d6a1bfb06af23dd01e7dde52b6ee7dadc589868f9abfac/tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272", size = 251170, upload-time = "2024-11-27T22:38:27.921Z" },
+ { url = "https://files.pythonhosted.org/packages/64/7b/22d713946efe00e0adbcdfd6d1aa119ae03fd0b60ebed51ebb3fa9f5a2e5/tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140", size = 236530, upload-time = "2024-11-27T22:38:29.591Z" },
+ { url = "https://files.pythonhosted.org/packages/38/31/3a76f67da4b0cf37b742ca76beaf819dca0ebef26d78fc794a576e08accf/tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2", size = 258666, upload-time = "2024-11-27T22:38:30.639Z" },
+ { url = "https://files.pythonhosted.org/packages/07/10/5af1293da642aded87e8a988753945d0cf7e00a9452d3911dd3bb354c9e2/tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744", size = 243954, upload-time = "2024-11-27T22:38:31.702Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/b9/1ed31d167be802da0fc95020d04cd27b7d7065cc6fbefdd2f9186f60d7bd/tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec", size = 98724, upload-time = "2024-11-27T22:38:32.837Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/32/b0963458706accd9afcfeb867c0f9175a741bf7b19cd424230714d722198/tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69", size = 109383, upload-time = "2024-11-27T22:38:34.455Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/c2/61d3e0f47e2b74ef40a68b9e6ad5984f6241a942f7cd3bbfbdbd03861ea9/tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc", size = 14257, upload-time = "2024-11-27T22:38:35.385Z" },
+]
+
+[[package]]
+name = "torch"
+version = "2.7.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "filelock", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "fsspec", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "jinja2", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "networkx", version = "3.5", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "setuptools", marker = "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "sympy", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "typing-extensions", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6a/27/2e06cb52adf89fe6e020963529d17ed51532fc73c1e6d1b18420ef03338c/torch-2.7.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a103b5d782af5bd119b81dbcc7ffc6fa09904c423ff8db397a1e6ea8fd71508f", size = 99089441, upload-time = "2025-06-04T17:38:48.268Z" },
+ { url = "https://files.pythonhosted.org/packages/92/f6/5da3918414e07da9866ecb9330fe6ffdebe15cb9a4c5ada7d4b6e0a6654d/torch-2.7.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:d72acfdb86cee2a32c0ce0101606f3758f0d8bb5f8f31e7920dc2809e963aa7c", size = 68630914, upload-time = "2025-06-04T17:39:31.162Z" },
+ { url = "https://files.pythonhosted.org/packages/11/56/2eae3494e3d375533034a8e8cf0ba163363e996d85f0629441fa9d9843fe/torch-2.7.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:236f501f2e383f1cb861337bdf057712182f910f10aeaf509065d54d339e49b2", size = 99093039, upload-time = "2025-06-04T17:39:06.963Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/2b/d36d57c66ff031f93b4fa432e86802f84991477e522adcdffd314454326b/torch-2.7.1-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:aea4fc1bf433d12843eb2c6b2204861f43d8364597697074c8d38ae2507f8730", size = 68640034, upload-time = "2025-06-04T17:39:17.989Z" },
+ { url = "https://files.pythonhosted.org/packages/87/93/fb505a5022a2e908d81fe9a5e0aa84c86c0d5f408173be71c6018836f34e/torch-2.7.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:27ea1e518df4c9de73af7e8a720770f3628e7f667280bce2be7a16292697e3fa", size = 98948276, upload-time = "2025-06-04T17:39:12.852Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/60/04b77281c730bb13460628e518c52721257814ac6c298acd25757f6a175c/torch-2.7.1-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:787687087412c4bd68d315e39bc1223f08aae1d16a9e9771d95eabbb04ae98fb", size = 68645146, upload-time = "2025-06-04T17:38:52.97Z" },
+ { url = "https://files.pythonhosted.org/packages/66/81/e48c9edb655ee8eb8c2a6026abdb6f8d2146abd1f150979ede807bb75dcb/torch-2.7.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:03563603d931e70722dce0e11999d53aa80a375a3d78e6b39b9f6805ea0a8d28", size = 98946649, upload-time = "2025-06-04T17:38:43.031Z" },
+ { url = "https://files.pythonhosted.org/packages/95/bf/e086ee36ddcef9299f6e708d3b6c8487c1651787bb9ee2939eb2a7f74911/torch-2.7.1-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0da4f4dba9f65d0d203794e619fe7ca3247a55ffdcbd17ae8fb83c8b2dc9b585", size = 68925988, upload-time = "2025-06-04T17:38:29.273Z" },
+ { url = "https://files.pythonhosted.org/packages/69/6a/67090dcfe1cf9048448b31555af6efb149f7afa0a310a366adbdada32105/torch-2.7.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:e08d7e6f21a617fe38eeb46dd2213ded43f27c072e9165dc27300c9ef9570934", size = 99028857, upload-time = "2025-06-04T17:37:50.956Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/29/beb45cdf5c4fc3ebe282bf5eafc8dfd925ead7299b3c97491900fe5ed844/torch-2.7.1-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:988b0cbc4333618a1056d2ebad9eb10089637b659eb645434d0809d8d937b946", size = 68645708, upload-time = "2025-06-04T17:34:39.852Z" },
+]
+
+[[package]]
+name = "torch"
+version = "2.8.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+dependencies = [
+ { name = "filelock", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "fsspec", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "jinja2", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "networkx", version = "3.4.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and platform_machine != 'aarch64') or (python_full_version < '3.11' and 'tegra' not in platform_release)" },
+ { name = "networkx", version = "3.5", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and platform_machine != 'aarch64') or (python_full_version >= '3.11' and 'tegra' not in platform_release)" },
+ { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cuda-runtime-cu12", version = "12.8.90", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cufile-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "setuptools", marker = "(python_full_version >= '3.12' and platform_machine != 'aarch64') or (python_full_version >= '3.12' and 'tegra' not in platform_release)" },
+ { name = "sympy", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
+ { name = "typing-extensions", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/63/28/110f7274254f1b8476c561dada127173f994afa2b1ffc044efb773c15650/torch-2.8.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:0be92c08b44009d4131d1ff7a8060d10bafdb7ddcb7359ef8d8c5169007ea905", size = 102052793, upload-time = "2025-08-06T14:53:15.852Z" },
+ { url = "https://files.pythonhosted.org/packages/70/1c/58da560016f81c339ae14ab16c98153d51c941544ae568da3cb5b1ceb572/torch-2.8.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:89aa9ee820bb39d4d72b794345cccef106b574508dd17dbec457949678c76011", size = 888025420, upload-time = "2025-08-06T14:54:18.014Z" },
+ { url = "https://files.pythonhosted.org/packages/70/87/f69752d0dd4ba8218c390f0438130c166fa264a33b7025adb5014b92192c/torch-2.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:e8e5bf982e87e2b59d932769938b698858c64cc53753894be25629bdf5cf2f46", size = 241363614, upload-time = "2025-08-06T14:53:31.496Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/d6/e6d4c57e61c2b2175d3aafbfb779926a2cfd7c32eeda7c543925dceec923/torch-2.8.0-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:a3f16a58a9a800f589b26d47ee15aca3acf065546137fc2af039876135f4c760", size = 73611154, upload-time = "2025-08-06T14:53:10.919Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/c4/3e7a3887eba14e815e614db70b3b529112d1513d9dae6f4d43e373360b7f/torch-2.8.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:220a06fd7af8b653c35d359dfe1aaf32f65aa85befa342629f716acb134b9710", size = 102073391, upload-time = "2025-08-06T14:53:20.937Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/63/4fdc45a0304536e75a5e1b1bbfb1b56dd0e2743c48ee83ca729f7ce44162/torch-2.8.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c12fa219f51a933d5f80eeb3a7a5d0cbe9168c0a14bbb4055f1979431660879b", size = 888063640, upload-time = "2025-08-06T14:55:05.325Z" },
+ { url = "https://files.pythonhosted.org/packages/84/57/2f64161769610cf6b1c5ed782bd8a780e18a3c9d48931319f2887fa9d0b1/torch-2.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:8c7ef765e27551b2fbfc0f41bcf270e1292d9bf79f8e0724848b1682be6e80aa", size = 241366752, upload-time = "2025-08-06T14:53:38.692Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/5e/05a5c46085d9b97e928f3f037081d3d2b87fb4b4195030fc099aaec5effc/torch-2.8.0-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:5ae0524688fb6707c57a530c2325e13bb0090b745ba7b4a2cd6a3ce262572916", size = 73621174, upload-time = "2025-08-06T14:53:25.44Z" },
+ { url = "https://files.pythonhosted.org/packages/49/0c/2fd4df0d83a495bb5e54dca4474c4ec5f9c62db185421563deeb5dabf609/torch-2.8.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:e2fab4153768d433f8ed9279c8133a114a034a61e77a3a104dcdf54388838705", size = 101906089, upload-time = "2025-08-06T14:53:52.631Z" },
+ { url = "https://files.pythonhosted.org/packages/99/a8/6acf48d48838fb8fe480597d98a0668c2beb02ee4755cc136de92a0a956f/torch-2.8.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2aca0939fb7e4d842561febbd4ffda67a8e958ff725c1c27e244e85e982173c", size = 887913624, upload-time = "2025-08-06T14:56:44.33Z" },
+ { url = "https://files.pythonhosted.org/packages/af/8a/5c87f08e3abd825c7dfecef5a0f1d9aa5df5dd0e3fd1fa2f490a8e512402/torch-2.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:2f4ac52f0130275d7517b03a33d2493bab3693c83dcfadf4f81688ea82147d2e", size = 241326087, upload-time = "2025-08-06T14:53:46.503Z" },
+ { url = "https://files.pythonhosted.org/packages/be/66/5c9a321b325aaecb92d4d1855421e3a055abd77903b7dab6575ca07796db/torch-2.8.0-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:619c2869db3ada2c0105487ba21b5008defcc472d23f8b80ed91ac4a380283b0", size = 73630478, upload-time = "2025-08-06T14:53:57.144Z" },
+ { url = "https://files.pythonhosted.org/packages/10/4e/469ced5a0603245d6a19a556e9053300033f9c5baccf43a3d25ba73e189e/torch-2.8.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2b2f96814e0345f5a5aed9bf9734efa913678ed19caf6dc2cddb7930672d6128", size = 101936856, upload-time = "2025-08-06T14:54:01.526Z" },
+ { url = "https://files.pythonhosted.org/packages/16/82/3948e54c01b2109238357c6f86242e6ecbf0c63a1af46906772902f82057/torch-2.8.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:65616ca8ec6f43245e1f5f296603e33923f4c30f93d65e103d9e50c25b35150b", size = 887922844, upload-time = "2025-08-06T14:55:50.78Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/54/941ea0a860f2717d86a811adf0c2cd01b3983bdd460d0803053c4e0b8649/torch-2.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:659df54119ae03e83a800addc125856effda88b016dfc54d9f65215c3975be16", size = 241330968, upload-time = "2025-08-06T14:54:45.293Z" },
+ { url = "https://files.pythonhosted.org/packages/de/69/8b7b13bba430f5e21d77708b616f767683629fc4f8037564a177d20f90ed/torch-2.8.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:1a62a1ec4b0498930e2543535cf70b1bef8c777713de7ceb84cd79115f553767", size = 73915128, upload-time = "2025-08-06T14:54:34.769Z" },
+ { url = "https://files.pythonhosted.org/packages/15/0e/8a800e093b7f7430dbaefa80075aee9158ec22e4c4fc3c1a66e4fb96cb4f/torch-2.8.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:83c13411a26fac3d101fe8035a6b0476ae606deb8688e904e796a3534c197def", size = 102020139, upload-time = "2025-08-06T14:54:39.047Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/15/5e488ca0bc6162c86a33b58642bc577c84ded17c7b72d97e49b5833e2d73/torch-2.8.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:8f0a9d617a66509ded240add3754e462430a6c1fc5589f86c17b433dd808f97a", size = 887990692, upload-time = "2025-08-06T14:56:18.286Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/a8/6a04e4b54472fc5dba7ca2341ab219e529f3c07b6941059fbf18dccac31f/torch-2.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:a7242b86f42be98ac674b88a4988643b9bc6145437ec8f048fea23f72feb5eca", size = 241603453, upload-time = "2025-08-06T14:55:22.945Z" },
+ { url = "https://files.pythonhosted.org/packages/04/6e/650bb7f28f771af0cb791b02348db8b7f5f64f40f6829ee82aa6ce99aabe/torch-2.8.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:7b677e17f5a3e69fdef7eb3b9da72622f8d322692930297e4ccb52fefc6c8211", size = 73632395, upload-time = "2025-08-06T14:55:28.645Z" },
+]
+
+[[package]]
+name = "torch-inference-optimized"
+version = "1.0.0"
+source = { editable = "." }
+dependencies = [
+ { name = "aiohttp" },
+ { name = "anyio" },
+ { name = "click" },
+ { name = "fastapi" },
+ { name = "huggingface-hub" },
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
+ { name = "onnx" },
+ { name = "onnxruntime" },
+ { name = "onnxsim" },
+ { name = "opencv-python" },
+ { name = "pillow" },
+ { name = "psutil" },
+ { name = "pydantic" },
+ { name = "pyyaml" },
+ { name = "requests" },
+ { name = "torch", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torchaudio", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torchaudio", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torchvision", version = "0.22.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torchvision", version = "0.23.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "tqdm" },
+ { name = "uvicorn" },
+]
+
+[package.optional-dependencies]
+all = [
+ { name = "bandit", extra = ["toml"] },
+ { name = "black" },
+ { name = "mkdocs" },
+ { name = "mkdocs-material" },
+ { name = "mkdocstrings", extra = ["python"] },
+ { name = "mypy" },
+ { name = "onnxruntime-gpu" },
+ { name = "pre-commit" },
+ { name = "pytest" },
+ { name = "pytest-asyncio" },
+ { name = "pytest-benchmark" },
+ { name = "pytest-cov" },
+ { name = "pytest-html" },
+ { name = "pytest-json-report" },
+ { name = "pytest-mock" },
+ { name = "pytest-timeout" },
+ { name = "pytest-xdist" },
+ { name = "ruff" },
+ { name = "safety" },
+ { name = "tensorrt", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "tensorrt", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torch", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torch-tensorrt", version = "2.7.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torch-tensorrt", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torchaudio", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torchaudio", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torchvision", version = "0.22.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torchvision", version = "0.23.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "tox" },
+]
+cuda = [
+ { name = "onnxruntime-gpu" },
+ { name = "torch", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torchaudio", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torchaudio", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torchvision", version = "0.22.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torchvision", version = "0.23.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+dev = [
+ { name = "bandit", extra = ["toml"] },
+ { name = "black" },
+ { name = "mypy" },
+ { name = "pre-commit" },
+ { name = "pytest" },
+ { name = "pytest-asyncio" },
+ { name = "pytest-benchmark" },
+ { name = "pytest-cov" },
+ { name = "pytest-html" },
+ { name = "pytest-json-report" },
+ { name = "pytest-mock" },
+ { name = "pytest-timeout" },
+ { name = "pytest-xdist" },
+ { name = "ruff" },
+ { name = "safety" },
+ { name = "tox" },
+]
+docs = [
+ { name = "mkdocs" },
+ { name = "mkdocs-material" },
+ { name = "mkdocstrings", extra = ["python"] },
+]
+tensorrt = [
+ { name = "tensorrt", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "tensorrt", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torch-tensorrt", version = "2.7.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torch-tensorrt", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+
+[package.dev-dependencies]
+dev = [
+ { name = "bandit", extra = ["toml"] },
+ { name = "black" },
+ { name = "mypy" },
+ { name = "pre-commit" },
+ { name = "pytest" },
+ { name = "pytest-asyncio" },
+ { name = "pytest-benchmark" },
+ { name = "pytest-cov" },
+ { name = "pytest-html" },
+ { name = "pytest-json-report" },
+ { name = "pytest-mock" },
+ { name = "pytest-timeout" },
+ { name = "pytest-xdist" },
+ { name = "ruff" },
+ { name = "safety" },
+ { name = "tox" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "aiohttp", specifier = ">=3.11.0" },
+ { name = "anyio", specifier = ">=4.8.0" },
+ { name = "bandit", extras = ["toml"], marker = "extra == 'dev'", specifier = ">=1.7.0" },
+ { name = "black", marker = "extra == 'dev'", specifier = ">=23.0.0" },
+ { name = "click", specifier = ">=8.1.0" },
+ { name = "fastapi", specifier = ">=0.115.0" },
+ { name = "huggingface-hub", specifier = ">=0.29.0" },
+ { name = "mkdocs", marker = "extra == 'docs'", specifier = ">=1.5.0" },
+ { name = "mkdocs-material", marker = "extra == 'docs'", specifier = ">=9.0.0" },
+ { name = "mkdocstrings", extras = ["python"], marker = "extra == 'docs'", specifier = ">=0.24.0" },
+ { name = "mypy", marker = "extra == 'dev'", specifier = ">=1.0.0" },
+ { name = "numpy", specifier = ">=2.1.0" },
+ { name = "onnx", specifier = ">=1.14.0" },
+ { name = "onnxruntime", specifier = ">=1.16.0" },
+ { name = "onnxruntime-gpu", marker = "extra == 'cuda'", specifier = ">=1.16.0" },
+ { name = "onnxsim", specifier = ">=0.4.0" },
+ { name = "opencv-python", specifier = ">=4.11.0" },
+ { name = "pillow", specifier = ">=11.0.0" },
+ { name = "pre-commit", marker = "extra == 'dev'", specifier = ">=3.0.0" },
+ { name = "psutil", specifier = ">=7.0.0" },
+ { name = "pydantic", specifier = ">=2.10.0" },
+ { name = "pytest", marker = "extra == 'dev'", specifier = ">=7.0.0" },
+ { name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.21.0" },
+ { name = "pytest-benchmark", marker = "extra == 'dev'", specifier = ">=4.0.0" },
+ { name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=4.1.0" },
+ { name = "pytest-html", marker = "extra == 'dev'", specifier = ">=3.2.0" },
+ { name = "pytest-json-report", marker = "extra == 'dev'", specifier = ">=1.5.0" },
+ { name = "pytest-mock", marker = "extra == 'dev'", specifier = ">=3.11.0" },
+ { name = "pytest-timeout", marker = "extra == 'dev'", specifier = ">=2.1.0" },
+ { name = "pytest-xdist", marker = "extra == 'dev'", specifier = ">=3.3.0" },
+ { name = "pyyaml", specifier = ">=6.0.0" },
+ { name = "requests", specifier = ">=2.32.0" },
+ { name = "ruff", marker = "extra == 'dev'", specifier = ">=0.1.0" },
+ { name = "safety", marker = "extra == 'dev'", specifier = ">=2.0.0" },
+ { name = "tensorrt", marker = "extra == 'tensorrt'", specifier = ">=10.7.0" },
+ { name = "torch", specifier = ">=2.6.0" },
+ { name = "torch", marker = "extra == 'cuda'", specifier = ">=2.6.0" },
+ { name = "torch-inference-optimized", extras = ["cuda", "tensorrt", "dev", "docs"], marker = "extra == 'all'" },
+ { name = "torch-tensorrt", marker = "extra == 'tensorrt'", specifier = ">=2.6.0" },
+ { name = "torchaudio", specifier = ">=2.5.0" },
+ { name = "torchaudio", marker = "extra == 'cuda'", specifier = ">=2.5.0" },
+ { name = "torchvision", specifier = ">=0.20.0" },
+ { name = "torchvision", marker = "extra == 'cuda'", specifier = ">=0.20.0" },
+ { name = "tox", marker = "extra == 'dev'", specifier = ">=4.0.0" },
+ { name = "tqdm", specifier = ">=4.67.0" },
+ { name = "uvicorn", specifier = ">=0.34.0" },
+]
+provides-extras = ["cuda", "tensorrt", "dev", "docs", "all"]
+
+[package.metadata.requires-dev]
+dev = [
+ { name = "bandit", extras = ["toml"], specifier = ">=1.7.0" },
+ { name = "black", specifier = ">=23.0.0" },
+ { name = "mypy", specifier = ">=1.0.0" },
+ { name = "pre-commit", specifier = ">=3.0.0" },
+ { name = "pytest", specifier = ">=7.0.0" },
+ { name = "pytest-asyncio", specifier = ">=0.21.0" },
+ { name = "pytest-benchmark", specifier = ">=4.0.0" },
+ { name = "pytest-cov", specifier = ">=4.1.0" },
+ { name = "pytest-html", specifier = ">=3.2.0" },
+ { name = "pytest-json-report", specifier = ">=1.5.0" },
+ { name = "pytest-mock", specifier = ">=3.11.0" },
+ { name = "pytest-timeout", specifier = ">=2.1.0" },
+ { name = "pytest-xdist", specifier = ">=3.3.0" },
+ { name = "ruff", specifier = ">=0.1.0" },
+ { name = "safety", specifier = ">=2.0.0" },
+ { name = "tox", specifier = ">=4.0.0" },
+]
+
+[[package]]
+name = "torch-tensorrt"
+version = "2.7.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "packaging", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "tensorrt", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "tensorrt-cu12", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "tensorrt-cu12-bindings", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "tensorrt-cu12-libs", version = "10.9.0.34", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torch", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "typing-extensions", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+]
+
+[[package]]
+name = "torch-tensorrt"
+version = "2.8.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+dependencies = [
+ { name = "dllist", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and platform_machine != 'aarch64') or (python_full_version < '3.11' and 'tegra' not in platform_release)" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and platform_machine != 'aarch64') or (python_full_version >= '3.11' and 'tegra' not in platform_release)" },
+ { name = "packaging", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "tensorrt", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "tensorrt-cu12-bindings", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "tensorrt-cu12-libs", version = "10.12.0.36", source = { registry = "https://pypi.nvidia.com/" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "typing-extensions", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3e/f9/cd4d172738a14918bc3e7b415ee0bdf2f9cc953424c54d42bf38da5dc56b/torch_tensorrt-2.8.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_34_x86_64.whl", hash = "sha256:e4400ec62674d5884d19dcf437a7c084f3b235632e40d48d55e30be7b707ce31", size = 15039215, upload-time = "2025-08-09T06:02:00.436Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/8a/54a79f2959144d7e3f226fee90b71e9b658d1cdd1eafed60bd472d54d67a/torch_tensorrt-2.8.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6adb18682d42af150bdabbf10ac6b5dcbab844574b4fe94ecbb383bc9d5873c5", size = 3524433, upload-time = "2025-08-09T06:02:03.826Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/06/40ae2dcf8dc650ceb55aa6fad909741292360c4bcb42468f0c190bcbfff2/torch_tensorrt-2.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:5111fec36da7937b9baf7ef40ae04455f869447072d9c93859e80e19e0a76296", size = 1824938, upload-time = "2025-08-09T06:02:05.99Z" },
+ { url = "https://files.pythonhosted.org/packages/24/ff/9ddab25fc8bea6537245f86c861f5e0e2c88a26f5e5c7f2dca038af145d7/torch_tensorrt-2.8.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_34_x86_64.whl", hash = "sha256:d6fba1e7fa155f73d157b9207069449a31ad18a075efa81cef8048dac138af9a", size = 15077169, upload-time = "2025-08-09T06:02:11.484Z" },
+ { url = "https://files.pythonhosted.org/packages/57/6a/d2aec3fae13017a9b32c773d842c828a45b238866d7457c3b8d0e4c5a3b8/torch_tensorrt-2.8.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:84a2bfb377677ff76463393e3b0a51e6ddac50ee4ba8856c2cdbd6e8cfedd94c", size = 3527295, upload-time = "2025-08-09T06:02:14.207Z" },
+ { url = "https://files.pythonhosted.org/packages/14/f9/c8e033330605f752caff5932fbb4b5df2f71d1fd9f98efa3cf8068090ed5/torch_tensorrt-2.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:890441ac87c9e880c409e7ae42c8d522c6ae69eeaa4125985d6399ada7043840", size = 1826853, upload-time = "2025-08-09T06:02:16.093Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/77/41c4366692f51e974391eddd9b38d24a8b7b48dfa26d92352e629e7cbac9/torch_tensorrt-2.8.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_34_x86_64.whl", hash = "sha256:7a2e7c6984e6c7a4b228bc25bf1bfc96c64ab2913f5477e12967ccdb28ced223", size = 15091182, upload-time = "2025-08-09T06:02:31.286Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/92/975eb4b3cc8a29afe81f3a029be1f19d39af46d047336af7d061c8103dfd/torch_tensorrt-2.8.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:70e6c642a570ba02bbb645d7bbec060528c001af7553b65322a849788e9338d9", size = 3525858, upload-time = "2025-08-09T06:02:35.171Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/bd/871d8556fddd36fbe8dbd3eb4a30be4375a21f5b2ed5bfd51f2e0dd843cd/torch_tensorrt-2.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:2712f076e4f61adaaff3eecefea4e14d7ca85065eb5080cee4b5eeb46d27c1d4", size = 1827222, upload-time = "2025-08-09T06:02:37.612Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/36/c91bd9b502c4effe9d6fecffbd8251a04d38264b5289df0f3e011b08ca07/torch_tensorrt-2.8.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_34_x86_64.whl", hash = "sha256:0df341e72bec335bdf236506cee22530146a3c2a2bcf224e82a8a83e8d2d14f8", size = 15081324, upload-time = "2025-08-09T06:02:43.15Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/5d/0dd70645eb720a4bb285c7ac10430248b6ab3bd920af54dc26ee652683bb/torch_tensorrt-2.8.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:be123e48d6c4549a558e3b6b37640b22a967d8da5db49ae3cf64c743b93fae74", size = 3518730, upload-time = "2025-08-09T06:02:46.272Z" },
+ { url = "https://files.pythonhosted.org/packages/19/d8/01a65106f9e52ddd3e4d9cf6e6aa99703dc6ec1037b35781d96eb5289471/torch_tensorrt-2.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:88509127a00fc93c9f83a123d0abf6ff7b56100005866d301d9a0b160ccbaee1", size = 1827298, upload-time = "2025-08-09T06:02:48.786Z" },
+]
+
+[[package]]
+name = "torchaudio"
+version = "2.7.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "torch", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/da/71/bfc6d2b28ede6c4c5446901cfa4d98fa25b2606eb12e641baccec16fcde0/torchaudio-2.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4739af57d0eb94347d1c6a1b5668be78a7383afe826dde18a04883b9f9f263b1", size = 1842457, upload-time = "2025-06-04T17:44:12.073Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/8c/35eea5138ccd4abf38b163743d5ab4a8b25349bafa8bdf3d629e7f3036b9/torchaudio-2.7.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c089dbfc14c5f47091b7bf3f6bf2bbac93b86619299d04d9c102f4ad53758990", size = 1680682, upload-time = "2025-06-04T17:44:11.056Z" },
+ { url = "https://files.pythonhosted.org/packages/85/a2/52e6760d352584ae1ab139d97647bdc51d1eb7d480b688fe69c72616c956/torchaudio-2.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d5a62f88c629035913f506df03f710c48fc8bb9637191933f27c67088d5ca136", size = 1849254, upload-time = "2025-06-04T17:44:05.392Z" },
+ { url = "https://files.pythonhosted.org/packages/df/e6/0f3835895f9d0b8900ca4a7196932b13b74156ad9ffb76e7aacfc5bb4157/torchaudio-2.7.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:53bc4ba12e7468be34a7ca2ee837ee5c8bd5755b25c12f665af9339cae37e265", size = 1686156, upload-time = "2025-06-04T17:44:09.39Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/d1/eb8bc3b3502dddb1b789567b7b19668b1d32817266887b9f381494cfe463/torchaudio-2.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9306dcfc4586cebd7647a93fe9a448e791c4f83934da616b9433b75597a1f978", size = 1846897, upload-time = "2025-06-04T17:44:07.79Z" },
+ { url = "https://files.pythonhosted.org/packages/62/7d/6c15f15d3edc5271abc808f70713644b50f0f7bfb85a09dba8b5735fbad3/torchaudio-2.7.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:d66bd76b226fdd4135c97650e1b7eb63fb7659b4ed0e3a778898e41dbba21b61", size = 1686680, upload-time = "2025-06-04T17:43:58.986Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/ee/6e308868b9467e1b51da9d781cb73dd5aadca7c8b6256f88ce5d18a7fb77/torchaudio-2.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e5f0599a507f4683546878ed9667e1b32d7ca3c8a957e4c15c6b302378ef4dee", size = 1847208, upload-time = "2025-06-04T17:44:01.365Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/f9/ca0e0960526e6deaa476d168b877480a3fbae5d44668a54de963a9800097/torchaudio-2.7.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:271f717844e5c7f9e05c8328de817bf90f46d83281c791e94f54d4edea2f5817", size = 1686311, upload-time = "2025-06-04T17:44:02.785Z" },
+ { url = "https://files.pythonhosted.org/packages/73/5e/da52d2fa9f7cc89512b63dd8a88fb3e097a89815f440cc16159b216ec611/torchaudio-2.7.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:18560955b8beb2a8d39a6bfae20a442337afcefb3dfd4ee007ce82233a796799", size = 1929983, upload-time = "2025-06-04T17:43:56.659Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/16/9d03dc62613f276f9666eb0609164287df23986b67d20b53e78d21a3d8d8/torchaudio-2.7.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:1850475ef9101ea0b3593fe93ff6ee4e7a20598f6da6510761220b9fe56eb7fa", size = 1700436, upload-time = "2025-06-04T17:43:55.589Z" },
+]
+
+[[package]]
+name = "torchaudio"
+version = "2.8.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+dependencies = [
+ { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/30/81/92d34ff136b17ddda872f6d8149f2ca927ad53a37ae26d02cb5f66435772/torchaudio-2.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c2f44cf279f673cfcdd8f576c349eee8bedf8caab351a5dd78b32970cc34a212", size = 1852315, upload-time = "2025-08-06T14:58:32.64Z" },
+ { url = "https://files.pythonhosted.org/packages/95/c8/e46c22a3c059844bb0f1b670317c9e538b51728558326dcd9e5fffbf2ec2/torchaudio-2.8.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:d3c1b85b26a09832d139f6d6da6b66caeb51d2e16e08f8587665c44a9e1aa8f9", size = 1685620, upload-time = "2025-08-06T14:58:34.045Z" },
+ { url = "https://files.pythonhosted.org/packages/d8/f5/69db76b564263f22c1788cc298ab1c4e2391a79fa8ba7b4a3e76d945292a/torchaudio-2.8.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:58f912bf2d289c709b42a55475b2b483becec79d9affb7684b606bb1f896b434", size = 4001714, upload-time = "2025-08-06T14:58:38.951Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/18/63adf60a9f0592c6dcea2b37735990881edbbe671e3af3ae79f2da816a50/torchaudio-2.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:4e2b4712ad6d7547ce82d84567c8c29d5e2966ff1d31d94e1644024fb4b2649f", size = 2500313, upload-time = "2025-08-06T14:58:42.441Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/bf/6b01ef3defb8d0a772c863588711e9b2b011c27d6b37c1b9d15a359c8442/torchaudio-2.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c9276857d241c6de257af765c0f51fc011af38cb725401495121b280913007cf", size = 1859094, upload-time = "2025-08-06T14:58:35.078Z" },
+ { url = "https://files.pythonhosted.org/packages/75/ca/da5d0a3bb7d114a8b590ecce14859ea0a05102bb4de68cdd1ed7a90634d6/torchaudio-2.8.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:4573c6042950c20278e3608a9a38050ba0bc72e0049e1bbfd249caf859a8029b", size = 1692033, upload-time = "2025-08-06T14:58:37.393Z" },
+ { url = "https://files.pythonhosted.org/packages/b6/ef/62ac736d8f906cc414181050e08a495a637dab985186c34bd76ea37efbc0/torchaudio-2.8.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:776c0b4ba84b9e3ddf6304b9c47cd63549d7896a6f3d5184ece074cc3d76ed6b", size = 4011716, upload-time = "2025-08-06T14:58:40.138Z" },
+ { url = "https://files.pythonhosted.org/packages/14/86/015337c8434abc604b8680371df783f66c421a7f211cbe40a374b0540b6d/torchaudio-2.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:078105bf80f725c0215a0bebac8cb2fb1b3993ab32bdc3fcd50145a5b4127001", size = 2505194, upload-time = "2025-08-06T14:58:57.301Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/cc/c2e2a3eb6ee956f73c68541e439916f8146170ea9cc61e72adea5c995312/torchaudio-2.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ddef94bf181e6447cbb05f38beaca8f6c5bb8d2b9ddced1aa3452025b9fc70d3", size = 1856736, upload-time = "2025-08-06T14:58:36.3Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/0d/24dad878784f1edd62862f27173781669f0c71eb46368636787d1e364188/torchaudio-2.8.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:862e2e40bf09d865e5df080a84c1a39bbcef40e43140f4b1737eb3a389d3b38f", size = 1692930, upload-time = "2025-08-06T14:58:41.312Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/a6/84d80f34472503e9eb82245d7df501c59602d75d7360e717fb9b84f91c5e/torchaudio-2.8.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:93a8583f280fe83ba021aa713319381ea71362cc87b67ee38e97a43cb2254aee", size = 4014607, upload-time = "2025-08-06T14:58:47.234Z" },
+ { url = "https://files.pythonhosted.org/packages/43/ab/96ad33afa320738a7cfb4b51ba97e2f3cfb1e04ae3115d5057655103ba4f/torchaudio-2.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:4b82cacd1b8ccd543b1149d8cab257a40dfda8119023d2e3a96c66349c84bffb", size = 2499890, upload-time = "2025-08-06T14:58:55.066Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/ea/2a68259c4dbb5fe44ebfdcfa40b115010d8c677221a7ef0f5577f3c4f5f1/torchaudio-2.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f851d32e94ca05e470f0c60e25726ec1e0eb71cb2ca5a0206b7fd03272ccc3c8", size = 1857045, upload-time = "2025-08-06T14:58:51.984Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/a3/1c79a8ef29fe403b83bdfc033db852bc2a888b80c406325e5c6fb37a7f2d/torchaudio-2.8.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:09535a9b727c0793cd07c1ace99f3f353626281bcc3e30c2f2314e3ebc9d3f96", size = 1692755, upload-time = "2025-08-06T14:58:50.868Z" },
+ { url = "https://files.pythonhosted.org/packages/49/df/61941198e9ac6bcebfdd57e1836e4f3c23409308e3d8d7458f0198a6a366/torchaudio-2.8.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:d2a85b124494736241884372fe1c6dd8c15e9bc1931bd325838c5c00238c7378", size = 4013897, upload-time = "2025-08-06T14:59:01.66Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/ab/7175d35a4bbc4a465a9f1388571842f16eb6dec5069d7ea9c8c2d7b5b401/torchaudio-2.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:c1b5139c840367a7855a062a06688a416619f6fd2ca46d9b9299b49a7d133dfd", size = 2500085, upload-time = "2025-08-06T14:58:44.95Z" },
+ { url = "https://files.pythonhosted.org/packages/34/1a/69b9f8349d9d57953d5e7e445075cbf74000173fb5f5d5d9e9d59415fc63/torchaudio-2.8.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:68df9c9068984edff8065c2b6656725e6114fe89281b0cf122c7505305fc98a4", size = 1935600, upload-time = "2025-08-06T14:58:46.051Z" },
+ { url = "https://files.pythonhosted.org/packages/71/76/40fec21b65bccfdc5c8cdb9d511033ab07a7ad4b05f0a5b07f85c68279fc/torchaudio-2.8.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:1951f10ed092f2dda57634f6a3950ef21c9d9352551aa84a9fccd51bbda18095", size = 1704199, upload-time = "2025-08-06T14:58:43.594Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/53/95c3363413c2f2009f805144160b093a385f641224465fbcd717449c71fb/torchaudio-2.8.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4f7d97494698d98854129349b12061e8c3398d33bd84c929fa9aed5fd1389f73", size = 4020596, upload-time = "2025-08-06T14:59:03.031Z" },
+ { url = "https://files.pythonhosted.org/packages/52/27/7fc2d7435af044ffbe0b9b8e98d99eac096d43f128a5cde23c04825d5dcf/torchaudio-2.8.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d4a715d09ac28c920d031ee1e60ecbc91e8a5079ad8c61c0277e658436c821a6", size = 2549553, upload-time = "2025-08-06T14:59:00.019Z" },
+]
+
+[[package]]
+name = "torchvision"
+version = "0.22.1"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'darwin'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform == 'linux'",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release and sys_platform != 'darwin' and sys_platform != 'linux'",
+]
+dependencies = [
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11' and platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "pillow", marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+ { name = "torch", version = "2.7.1", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine == 'aarch64' and 'tegra' in platform_release" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/15/2c/7b67117b14c6cc84ae3126ca6981abfa3af2ac54eb5252b80d9475fb40df/torchvision-0.22.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3b47d8369ee568c067795c0da0b4078f39a9dfea6f3bc1f3ac87530dfda1dd56", size = 1947825, upload-time = "2025-06-04T17:43:15.523Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/9f/c4dcf1d232b75e28bc37e21209ab2458d6d60235e16163544ed693de54cb/torchvision-0.22.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:990de4d657a41ed71680cd8be2e98ebcab55371f30993dc9bd2e676441f7180e", size = 2512611, upload-time = "2025-06-04T17:43:03.951Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/00/bdab236ef19da050290abc2b5203ff9945c84a1f2c7aab73e8e9c8c85669/torchvision-0.22.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4addf626e2b57fc22fd6d329cf1346d474497672e6af8383b7b5b636fba94a53", size = 1947827, upload-time = "2025-06-04T17:43:10.84Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/d0/18f951b2be3cfe48c0027b349dcc6fde950e3dc95dd83e037e86f284f6fd/torchvision-0.22.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:8b4a53a6067d63adba0c52f2b8dd2290db649d642021674ee43c0c922f0c6a69", size = 2514021, upload-time = "2025-06-04T17:43:07.608Z" },
+ { url = "https://files.pythonhosted.org/packages/02/90/f4e99a5112dc221cf68a485e853cc3d9f3f1787cb950b895f3ea26d1ea98/torchvision-0.22.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:153f1790e505bd6da123e21eee6e83e2e155df05c0fe7d56347303067d8543c5", size = 1947827, upload-time = "2025-06-04T17:43:11.945Z" },
+ { url = "https://files.pythonhosted.org/packages/25/f6/53e65384cdbbe732cc2106bb04f7fb908487e4fb02ae4a1613ce6904a122/torchvision-0.22.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:964414eef19459d55a10e886e2fca50677550e243586d1678f65e3f6f6bac47a", size = 2514576, upload-time = "2025-06-04T17:43:02.707Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/30/fecdd09fb973e963da68207fe9f3d03ec6f39a935516dc2a98397bf495c6/torchvision-0.22.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c3ae3319624c43cc8127020f46c14aa878406781f0899bb6283ae474afeafbf", size = 1947818, upload-time = "2025-06-04T17:42:51.954Z" },
+ { url = "https://files.pythonhosted.org/packages/55/f4/b45f6cd92fa0acfac5e31b8e9258232f25bcdb0709a604e8b8a39d76e411/torchvision-0.22.1-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:4a614a6a408d2ed74208d0ea6c28a2fbb68290e9a7df206c5fef3f0b6865d307", size = 2471597, upload-time = "2025-06-04T17:42:48.838Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/ca/e9a06bd61ee8e04fb4962a3fb524fe6ee4051662db07840b702a9f339b24/torchvision-0.22.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:043d9e35ed69c2e586aff6eb9e2887382e7863707115668ac9d140da58f42cba", size = 2137623, upload-time = "2025-06-04T17:43:05.028Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/c8/2ebe90f18e7ffa2120f5c3eab62aa86923185f78d2d051a455ea91461608/torchvision-0.22.1-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:27142bcc8a984227a6dcf560985e83f52b82a7d3f5fe9051af586a2ccc46ef26", size = 2476561, upload-time = "2025-06-04T17:42:59.691Z" },
+]
+
+[[package]]
+name = "torchvision"
+version = "0.23.0"
+source = { registry = "https://pypi.org/simple" }
+resolution-markers = [
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version >= '3.12' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version >= '3.12' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version == '3.11.*' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version == '3.11.*' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'darwin') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform == 'darwin')",
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and 'tegra' not in platform_release and sys_platform == 'linux'",
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform != 'darwin') or (python_full_version < '3.11' and platform_machine != 'aarch64' and sys_platform == 'linux') or (python_full_version < '3.11' and 'tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')",
+]
+dependencies = [
+ { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version < '3.11' and platform_machine != 'aarch64') or (python_full_version < '3.11' and 'tegra' not in platform_release)" },
+ { name = "numpy", version = "2.3.2", source = { registry = "https://pypi.org/simple" }, marker = "(python_full_version >= '3.11' and platform_machine != 'aarch64') or (python_full_version >= '3.11' and 'tegra' not in platform_release)" },
+ { name = "pillow", marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+ { name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'aarch64' or 'tegra' not in platform_release" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4d/49/5ad5c3ff4920be0adee9eb4339b4fb3b023a0fc55b9ed8dbc73df92946b8/torchvision-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7266871daca00ad46d1c073e55d972179d12a58fa5c9adec9a3db9bbed71284a", size = 1856885, upload-time = "2025-08-06T14:57:55.024Z" },
+ { url = "https://files.pythonhosted.org/packages/25/44/ddd56d1637bac42a8c5da2c8c440d8a28c431f996dd9790f32dd9a96ca6e/torchvision-0.23.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:31c583ba27426a3a04eca8c05450524105c1564db41be6632f7536ef405a6de2", size = 2394251, upload-time = "2025-08-06T14:58:01.725Z" },
+ { url = "https://files.pythonhosted.org/packages/93/f3/3cdf55bbf0f737304d997561c34ab0176222e0496b6743b0feab5995182c/torchvision-0.23.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:3932bf67256f2d095ce90a9f826f6033694c818856f4bb26794cf2ce64253e53", size = 8627497, upload-time = "2025-08-06T14:58:09.317Z" },
+ { url = "https://files.pythonhosted.org/packages/97/90/02afe57c3ef4284c5cf89d3b7ae203829b3a981f72b93a7dd2a3fd2c83c1/torchvision-0.23.0-cp310-cp310-win_amd64.whl", hash = "sha256:83ee5bf827d61a8af14620c0a61d8608558638ac9c3bac8adb7b27138e2147d1", size = 1600760, upload-time = "2025-08-06T14:57:56.783Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/d7/15d3d7bd8d0239211b21673d1bac7bc345a4ad904a8e25bb3fd8a9cf1fbc/torchvision-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49aa20e21f0c2bd458c71d7b449776cbd5f16693dd5807195a820612b8a229b7", size = 1856884, upload-time = "2025-08-06T14:58:00.237Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/14/7b44fe766b7d11e064c539d92a172fa9689a53b69029e24f2f1f51e7dc56/torchvision-0.23.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:01dc33ee24c79148aee7cdbcf34ae8a3c9da1674a591e781577b716d233b1fa6", size = 2395543, upload-time = "2025-08-06T14:58:04.373Z" },
+ { url = "https://files.pythonhosted.org/packages/79/9c/fcb09aff941c8147d9e6aa6c8f67412a05622b0c750bcf796be4c85a58d4/torchvision-0.23.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:35c27941831b653f5101edfe62c03d196c13f32139310519e8228f35eae0e96a", size = 8628388, upload-time = "2025-08-06T14:58:07.802Z" },
+ { url = "https://files.pythonhosted.org/packages/93/40/3415d890eb357b25a8e0a215d32365a88ecc75a283f75c4e919024b22d97/torchvision-0.23.0-cp311-cp311-win_amd64.whl", hash = "sha256:09bfde260e7963a15b80c9e442faa9f021c7e7f877ac0a36ca6561b367185013", size = 1600741, upload-time = "2025-08-06T14:57:59.158Z" },
+ { url = "https://files.pythonhosted.org/packages/df/1d/0ea0b34bde92a86d42620f29baa6dcbb5c2fc85990316df5cb8f7abb8ea2/torchvision-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e0e2c04a91403e8dd3af9756c6a024a1d9c0ed9c0d592a8314ded8f4fe30d440", size = 1856885, upload-time = "2025-08-06T14:58:06.503Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/00/2f6454decc0cd67158c7890364e446aad4b91797087a57a78e72e1a8f8bc/torchvision-0.23.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:6dd7c4d329a0e03157803031bc856220c6155ef08c26d4f5bbac938acecf0948", size = 2396614, upload-time = "2025-08-06T14:58:03.116Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/b5/3e580dcbc16f39a324f3dd71b90edbf02a42548ad44d2b4893cc92b1194b/torchvision-0.23.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4e7d31c43bc7cbecbb1a5652ac0106b436aa66e26437585fc2c4b2cf04d6014c", size = 8627108, upload-time = "2025-08-06T14:58:12.956Z" },
+ { url = "https://files.pythonhosted.org/packages/82/c1/c2fe6d61e110a8d0de2f94276899a2324a8f1e6aee559eb6b4629ab27466/torchvision-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:a2e45272abe7b8bf0d06c405e78521b5757be1bd0ed7e5cd78120f7fdd4cbf35", size = 1600723, upload-time = "2025-08-06T14:57:57.986Z" },
+ { url = "https://files.pythonhosted.org/packages/91/37/45a5b9407a7900f71d61b2b2f62db4b7c632debca397f205fdcacb502780/torchvision-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1c37e325e09a184b730c3ef51424f383ec5745378dc0eca244520aca29722600", size = 1856886, upload-time = "2025-08-06T14:58:05.491Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/da/a06c60fc84fc849377cf035d3b3e9a1c896d52dbad493b963c0f1cdd74d0/torchvision-0.23.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:2f7fd6c15f3697e80627b77934f77705f3bc0e98278b989b2655de01f6903e1d", size = 2353112, upload-time = "2025-08-06T14:58:26.265Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/27/5ce65ba5c9d3b7d2ccdd79892ab86a2f87ac2ca6638f04bb0280321f1a9c/torchvision-0.23.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:a76fafe113b2977be3a21bf78f115438c1f88631d7a87203acb3dd6ae55889e6", size = 8627658, upload-time = "2025-08-06T14:58:15.999Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/e4/028a27b60aa578a2fa99d9d7334ff1871bb17008693ea055a2fdee96da0d/torchvision-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:07d069cb29691ff566e3b7f11f20d91044f079e1dbdc9d72e0655899a9b06938", size = 1600749, upload-time = "2025-08-06T14:58:10.719Z" },
+ { url = "https://files.pythonhosted.org/packages/05/35/72f91ad9ac7c19a849dedf083d347dc1123f0adeb401f53974f84f1d04c8/torchvision-0.23.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:2df618e1143805a7673aaf82cb5720dd9112d4e771983156aaf2ffff692eebf9", size = 2047192, upload-time = "2025-08-06T14:58:11.813Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/9d/406cea60a9eb9882145bcd62a184ee61e823e8e1d550cdc3c3ea866a9445/torchvision-0.23.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:2a3299d2b1d5a7aed2d3b6ffb69c672ca8830671967eb1cee1497bacd82fe47b", size = 2359295, upload-time = "2025-08-06T14:58:17.469Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/f4/34662f71a70fa1e59de99772142f22257ca750de05ccb400b8d2e3809c1d/torchvision-0.23.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:76bc4c0b63d5114aa81281390f8472a12a6a35ce9906e67ea6044e5af4cab60c", size = 8800474, upload-time = "2025-08-06T14:58:22.53Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/f5/b5a2d841a8d228b5dbda6d524704408e19e7ca6b7bb0f24490e081da1fa1/torchvision-0.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b9e2dabf0da9c8aa9ea241afb63a8f3e98489e706b22ac3f30416a1be377153b", size = 1527667, upload-time = "2025-08-06T14:58:14.446Z" },
+]
+
+[[package]]
+name = "tox"
+version = "4.11.4"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "cachetools" },
+ { name = "chardet" },
+ { name = "colorama" },
+ { name = "filelock" },
+ { name = "packaging" },
+ { name = "platformdirs" },
+ { name = "pluggy" },
+ { name = "pyproject-api" },
+ { name = "tomli", marker = "python_full_version < '3.11'" },
+ { name = "virtualenv" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1a/78/e9672163a341a557b6fb9f6723c771835c763c0de2bccc9f4abc29e0a4a2/tox-4.11.4.tar.gz", hash = "sha256:73a7240778fabf305aeb05ab8ea26e575e042ab5a18d71d0ed13e343a51d6ce1", size = 176168, upload-time = "2023-11-28T04:14:16.034Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cd/88/de28a027acdb3a1b070a8acdff62bd23fdc23ce32acc7ac4b92b088979a4/tox-4.11.4-py3-none-any.whl", hash = "sha256:2adb83d68f27116812b69aa36676a8d6a52249cb0d173649de0e7d0c2e3e7229", size = 153989, upload-time = "2023-11-28T04:14:11.956Z" },
+]
+
+[[package]]
+name = "tqdm"
+version = "4.67.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" },
+]
+
+[[package]]
+name = "triton"
+version = "3.4.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "setuptools", marker = "(platform_machine != 'aarch64' and sys_platform != 'darwin') or (platform_machine != 'aarch64' and sys_platform == 'linux') or ('tegra' not in platform_release and sys_platform != 'darwin' and sys_platform != 'linux')" },
+]
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/62/ee/0ee5f64a87eeda19bbad9bc54ae5ca5b98186ed00055281fd40fb4beb10e/triton-3.4.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ff2785de9bc02f500e085420273bb5cc9c9bb767584a4aa28d6e360cec70128", size = 155430069, upload-time = "2025-07-30T19:58:21.715Z" },
+ { url = "https://files.pythonhosted.org/packages/7d/39/43325b3b651d50187e591eefa22e236b2981afcebaefd4f2fc0ea99df191/triton-3.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b70f5e6a41e52e48cfc087436c8a28c17ff98db369447bcaff3b887a3ab4467", size = 155531138, upload-time = "2025-07-30T19:58:29.908Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/66/b1eb52839f563623d185f0927eb3530ee4d5ffe9d377cdaf5346b306689e/triton-3.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:31c1d84a5c0ec2c0f8e8a072d7fd150cab84a9c239eaddc6706c081bfae4eb04", size = 155560068, upload-time = "2025-07-30T19:58:37.081Z" },
+ { url = "https://files.pythonhosted.org/packages/30/7b/0a685684ed5322d2af0bddefed7906674f67974aa88b0fae6e82e3b766f6/triton-3.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00be2964616f4c619193cb0d1b29a99bd4b001d7dc333816073f92cf2a8ccdeb", size = 155569223, upload-time = "2025-07-30T19:58:44.017Z" },
+ { url = "https://files.pythonhosted.org/packages/20/63/8cb444ad5cdb25d999b7d647abac25af0ee37d292afc009940c05b82dda0/triton-3.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7936b18a3499ed62059414d7df563e6c163c5e16c3773678a3ee3d417865035d", size = 155659780, upload-time = "2025-07-30T19:58:51.171Z" },
+]
+
+[[package]]
+name = "typer"
+version = "0.16.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "rich" },
+ { name = "shellingham" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/c5/8c/7d682431efca5fd290017663ea4588bf6f2c6aad085c7f108c5dbc316e70/typer-0.16.0.tar.gz", hash = "sha256:af377ffaee1dbe37ae9440cb4e8f11686ea5ce4e9bae01b84ae7c63b87f1dd3b", size = 102625, upload-time = "2025-05-26T14:30:31.824Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/42/3efaf858001d2c2913de7f354563e3a3a2f0decae3efe98427125a8f441e/typer-0.16.0-py3-none-any.whl", hash = "sha256:1f79bed11d4d02d4310e3c1b7ba594183bcedb0ac73b27a9e5f28f6fb5b98855", size = 46317, upload-time = "2025-05-26T14:30:30.523Z" },
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.14.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload-time = "2025-07-04T13:28:34.16Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload-time = "2025-07-04T13:28:32.743Z" },
+]
+
+[[package]]
+name = "typing-inspection"
+version = "0.4.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" },
+]
+
+[[package]]
+name = "urllib3"
+version = "2.5.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
+]
+
+[[package]]
+name = "uvicorn"
+version = "0.35.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "h11" },
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" },
+]
+
+[[package]]
+name = "virtualenv"
+version = "20.33.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "distlib" },
+ { name = "filelock" },
+ { name = "platformdirs" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/8b/60/4f20960df6c7b363a18a55ab034c8f2bcd5d9770d1f94f9370ec104c1855/virtualenv-20.33.1.tar.gz", hash = "sha256:1b44478d9e261b3fb8baa5e74a0ca3bc0e05f21aa36167bf9cbf850e542765b8", size = 6082160, upload-time = "2025-08-05T16:10:55.605Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ca/ff/ded57ac5ff40a09e6e198550bab075d780941e0b0f83cbeabd087c59383a/virtualenv-20.33.1-py3-none-any.whl", hash = "sha256:07c19bc66c11acab6a5958b815cbcee30891cd1c2ccf53785a28651a0d8d8a67", size = 6060362, upload-time = "2025-08-05T16:10:52.81Z" },
+]
+
+[[package]]
+name = "watchdog"
+version = "6.0.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0c/56/90994d789c61df619bfc5ce2ecdabd5eeff564e1eb47512bd01b5e019569/watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26", size = 96390, upload-time = "2024-11-01T14:06:24.793Z" },
+ { url = "https://files.pythonhosted.org/packages/55/46/9a67ee697342ddf3c6daa97e3a587a56d6c4052f881ed926a849fcf7371c/watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112", size = 88389, upload-time = "2024-11-01T14:06:27.112Z" },
+ { url = "https://files.pythonhosted.org/packages/44/65/91b0985747c52064d8701e1075eb96f8c40a79df889e59a399453adfb882/watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3", size = 89020, upload-time = "2024-11-01T14:06:29.876Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/24/d9be5cd6642a6aa68352ded4b4b10fb0d7889cb7f45814fb92cecd35f101/watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c", size = 96393, upload-time = "2024-11-01T14:06:31.756Z" },
+ { url = "https://files.pythonhosted.org/packages/63/7a/6013b0d8dbc56adca7fdd4f0beed381c59f6752341b12fa0886fa7afc78b/watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2", size = 88392, upload-time = "2024-11-01T14:06:32.99Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/40/b75381494851556de56281e053700e46bff5b37bf4c7267e858640af5a7f/watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c", size = 89019, upload-time = "2024-11-01T14:06:34.963Z" },
+ { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" },
+ { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" },
+ { url = "https://files.pythonhosted.org/packages/68/98/b0345cabdce2041a01293ba483333582891a3bd5769b08eceb0d406056ef/watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c", size = 96480, upload-time = "2024-11-01T14:06:42.952Z" },
+ { url = "https://files.pythonhosted.org/packages/85/83/cdf13902c626b28eedef7ec4f10745c52aad8a8fe7eb04ed7b1f111ca20e/watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134", size = 88451, upload-time = "2024-11-01T14:06:45.084Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/c4/225c87bae08c8b9ec99030cd48ae9c4eca050a59bf5c2255853e18c87b50/watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b", size = 89057, upload-time = "2024-11-01T14:06:47.324Z" },
+ { url = "https://files.pythonhosted.org/packages/30/ad/d17b5d42e28a8b91f8ed01cb949da092827afb9995d4559fd448d0472763/watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881", size = 87902, upload-time = "2024-11-01T14:06:53.119Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/ca/c3649991d140ff6ab67bfc85ab42b165ead119c9e12211e08089d763ece5/watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11", size = 88380, upload-time = "2024-11-01T14:06:55.19Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" },
+ { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" },
+ { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" },
+ { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" },
+ { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" },
+]
+
+[[package]]
+name = "yarl"
+version = "1.20.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "idna" },
+ { name = "multidict" },
+ { name = "propcache" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cb/65/7fed0d774abf47487c64be14e9223749468922817b5e8792b8a64792a1bb/yarl-1.20.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:6032e6da6abd41e4acda34d75a816012717000fa6839f37124a47fcefc49bec4", size = 132910, upload-time = "2025-06-10T00:42:31.108Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/7b/988f55a52da99df9e56dc733b8e4e5a6ae2090081dc2754fc8fd34e60aa0/yarl-1.20.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2c7b34d804b8cf9b214f05015c4fee2ebe7ed05cf581e7192c06555c71f4446a", size = 90644, upload-time = "2025-06-10T00:42:33.851Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/de/30d98f03e95d30c7e3cc093759982d038c8833ec2451001d45ef4854edc1/yarl-1.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c869f2651cc77465f6cd01d938d91a11d9ea5d798738c1dc077f3de0b5e5fed", size = 89322, upload-time = "2025-06-10T00:42:35.688Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/7a/f2f314f5ebfe9200724b0b748de2186b927acb334cf964fd312eb86fc286/yarl-1.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62915e6688eb4d180d93840cda4110995ad50c459bf931b8b3775b37c264af1e", size = 323786, upload-time = "2025-06-10T00:42:37.817Z" },
+ { url = "https://files.pythonhosted.org/packages/15/3f/718d26f189db96d993d14b984ce91de52e76309d0fd1d4296f34039856aa/yarl-1.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:41ebd28167bc6af8abb97fec1a399f412eec5fd61a3ccbe2305a18b84fb4ca73", size = 319627, upload-time = "2025-06-10T00:42:39.937Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/76/8fcfbf5fa2369157b9898962a4a7d96764b287b085b5b3d9ffae69cdefd1/yarl-1.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:21242b4288a6d56f04ea193adde174b7e347ac46ce6bc84989ff7c1b1ecea84e", size = 339149, upload-time = "2025-06-10T00:42:42.627Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/95/d7fc301cc4661785967acc04f54a4a42d5124905e27db27bb578aac49b5c/yarl-1.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bea21cdae6c7eb02ba02a475f37463abfe0a01f5d7200121b03e605d6a0439f8", size = 333327, upload-time = "2025-06-10T00:42:44.842Z" },
+ { url = "https://files.pythonhosted.org/packages/65/94/e21269718349582eee81efc5c1c08ee71c816bfc1585b77d0ec3f58089eb/yarl-1.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f8a891e4a22a89f5dde7862994485e19db246b70bb288d3ce73a34422e55b23", size = 326054, upload-time = "2025-06-10T00:42:47.149Z" },
+ { url = "https://files.pythonhosted.org/packages/32/ae/8616d1f07853704523519f6131d21f092e567c5af93de7e3e94b38d7f065/yarl-1.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd803820d44c8853a109a34e3660e5a61beae12970da479cf44aa2954019bf70", size = 315035, upload-time = "2025-06-10T00:42:48.852Z" },
+ { url = "https://files.pythonhosted.org/packages/48/aa/0ace06280861ef055855333707db5e49c6e3a08840a7ce62682259d0a6c0/yarl-1.20.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b982fa7f74c80d5c0c7b5b38f908971e513380a10fecea528091405f519b9ebb", size = 338962, upload-time = "2025-06-10T00:42:51.024Z" },
+ { url = "https://files.pythonhosted.org/packages/20/52/1e9d0e6916f45a8fb50e6844f01cb34692455f1acd548606cbda8134cd1e/yarl-1.20.1-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:33f29ecfe0330c570d997bcf1afd304377f2e48f61447f37e846a6058a4d33b2", size = 335399, upload-time = "2025-06-10T00:42:53.007Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/65/60452df742952c630e82f394cd409de10610481d9043aa14c61bf846b7b1/yarl-1.20.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:835ab2cfc74d5eb4a6a528c57f05688099da41cf4957cf08cad38647e4a83b30", size = 338649, upload-time = "2025-06-10T00:42:54.964Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/f5/6cd4ff38dcde57a70f23719a838665ee17079640c77087404c3d34da6727/yarl-1.20.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46b5e0ccf1943a9a6e766b2c2b8c732c55b34e28be57d8daa2b3c1d1d4009309", size = 358563, upload-time = "2025-06-10T00:42:57.28Z" },
+ { url = "https://files.pythonhosted.org/packages/d1/90/c42eefd79d0d8222cb3227bdd51b640c0c1d0aa33fe4cc86c36eccba77d3/yarl-1.20.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:df47c55f7d74127d1b11251fe6397d84afdde0d53b90bedb46a23c0e534f9d24", size = 357609, upload-time = "2025-06-10T00:42:59.055Z" },
+ { url = "https://files.pythonhosted.org/packages/03/c8/cea6b232cb4617514232e0f8a718153a95b5d82b5290711b201545825532/yarl-1.20.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:76d12524d05841276b0e22573f28d5fbcb67589836772ae9244d90dd7d66aa13", size = 350224, upload-time = "2025-06-10T00:43:01.248Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/a3/eaa0ab9712f1f3d01faf43cf6f1f7210ce4ea4a7e9b28b489a2261ca8db9/yarl-1.20.1-cp310-cp310-win32.whl", hash = "sha256:6c4fbf6b02d70e512d7ade4b1f998f237137f1417ab07ec06358ea04f69134f8", size = 81753, upload-time = "2025-06-10T00:43:03.486Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/34/e4abde70a9256465fe31c88ed02c3f8502b7b5dead693a4f350a06413f28/yarl-1.20.1-cp310-cp310-win_amd64.whl", hash = "sha256:aef6c4d69554d44b7f9d923245f8ad9a707d971e6209d51279196d8e8fe1ae16", size = 86817, upload-time = "2025-06-10T00:43:05.231Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/18/893b50efc2350e47a874c5c2d67e55a0ea5df91186b2a6f5ac52eff887cd/yarl-1.20.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:47ee6188fea634bdfaeb2cc420f5b3b17332e6225ce88149a17c413c77ff269e", size = 133833, upload-time = "2025-06-10T00:43:07.393Z" },
+ { url = "https://files.pythonhosted.org/packages/89/ed/b8773448030e6fc47fa797f099ab9eab151a43a25717f9ac043844ad5ea3/yarl-1.20.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0f6500f69e8402d513e5eedb77a4e1818691e8f45e6b687147963514d84b44b", size = 91070, upload-time = "2025-06-10T00:43:09.538Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/e3/409bd17b1e42619bf69f60e4f031ce1ccb29bd7380117a55529e76933464/yarl-1.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7a8900a42fcdaad568de58887c7b2f602962356908eedb7628eaf6021a6e435b", size = 89818, upload-time = "2025-06-10T00:43:11.575Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/77/64d8431a4d77c856eb2d82aa3de2ad6741365245a29b3a9543cd598ed8c5/yarl-1.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bad6d131fda8ef508b36be3ece16d0902e80b88ea7200f030a0f6c11d9e508d4", size = 347003, upload-time = "2025-06-10T00:43:14.088Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/d2/0c7e4def093dcef0bd9fa22d4d24b023788b0a33b8d0088b51aa51e21e99/yarl-1.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:df018d92fe22aaebb679a7f89fe0c0f368ec497e3dda6cb81a567610f04501f1", size = 336537, upload-time = "2025-06-10T00:43:16.431Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/f3/fc514f4b2cf02cb59d10cbfe228691d25929ce8f72a38db07d3febc3f706/yarl-1.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f969afbb0a9b63c18d0feecf0db09d164b7a44a053e78a7d05f5df163e43833", size = 362358, upload-time = "2025-06-10T00:43:18.704Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/6d/a313ac8d8391381ff9006ac05f1d4331cee3b1efaa833a53d12253733255/yarl-1.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:812303eb4aa98e302886ccda58d6b099e3576b1b9276161469c25803a8db277d", size = 357362, upload-time = "2025-06-10T00:43:20.888Z" },
+ { url = "https://files.pythonhosted.org/packages/00/70/8f78a95d6935a70263d46caa3dd18e1f223cf2f2ff2037baa01a22bc5b22/yarl-1.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98c4a7d166635147924aa0bf9bfe8d8abad6fffa6102de9c99ea04a1376f91e8", size = 348979, upload-time = "2025-06-10T00:43:23.169Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/05/42773027968968f4f15143553970ee36ead27038d627f457cc44bbbeecf3/yarl-1.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12e768f966538e81e6e7550f9086a6236b16e26cd964cf4df35349970f3551cf", size = 337274, upload-time = "2025-06-10T00:43:27.111Z" },
+ { url = "https://files.pythonhosted.org/packages/05/be/665634aa196954156741ea591d2f946f1b78ceee8bb8f28488bf28c0dd62/yarl-1.20.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe41919b9d899661c5c28a8b4b0acf704510b88f27f0934ac7a7bebdd8938d5e", size = 363294, upload-time = "2025-06-10T00:43:28.96Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/90/73448401d36fa4e210ece5579895731f190d5119c4b66b43b52182e88cd5/yarl-1.20.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:8601bc010d1d7780592f3fc1bdc6c72e2b6466ea34569778422943e1a1f3c389", size = 358169, upload-time = "2025-06-10T00:43:30.701Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/b0/fce922d46dc1eb43c811f1889f7daa6001b27a4005587e94878570300881/yarl-1.20.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:daadbdc1f2a9033a2399c42646fbd46da7992e868a5fe9513860122d7fe7a73f", size = 362776, upload-time = "2025-06-10T00:43:32.51Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/0d/b172628fce039dae8977fd22caeff3eeebffd52e86060413f5673767c427/yarl-1.20.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:03aa1e041727cb438ca762628109ef1333498b122e4c76dd858d186a37cec845", size = 381341, upload-time = "2025-06-10T00:43:34.543Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/9b/5b886d7671f4580209e855974fe1cecec409aa4a89ea58b8f0560dc529b1/yarl-1.20.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:642980ef5e0fa1de5fa96d905c7e00cb2c47cb468bfcac5a18c58e27dbf8d8d1", size = 379988, upload-time = "2025-06-10T00:43:36.489Z" },
+ { url = "https://files.pythonhosted.org/packages/73/be/75ef5fd0fcd8f083a5d13f78fd3f009528132a1f2a1d7c925c39fa20aa79/yarl-1.20.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:86971e2795584fe8c002356d3b97ef6c61862720eeff03db2a7c86b678d85b3e", size = 371113, upload-time = "2025-06-10T00:43:38.592Z" },
+ { url = "https://files.pythonhosted.org/packages/50/4f/62faab3b479dfdcb741fe9e3f0323e2a7d5cd1ab2edc73221d57ad4834b2/yarl-1.20.1-cp311-cp311-win32.whl", hash = "sha256:597f40615b8d25812f14562699e287f0dcc035d25eb74da72cae043bb884d773", size = 81485, upload-time = "2025-06-10T00:43:41.038Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/09/d9c7942f8f05c32ec72cd5c8e041c8b29b5807328b68b4801ff2511d4d5e/yarl-1.20.1-cp311-cp311-win_amd64.whl", hash = "sha256:26ef53a9e726e61e9cd1cda6b478f17e350fb5800b4bd1cd9fe81c4d91cfeb2e", size = 86686, upload-time = "2025-06-10T00:43:42.692Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" },
+ { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" },
+ { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" },
+ { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" },
+ { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" },
+ { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" },
+ { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" },
+ { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" },
+ { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" },
+ { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" },
+ { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" },
+ { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" },
+ { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" },
+ { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" },
+ { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" },
+ { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" },
+ { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" },
+ { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" },
+ { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" },
+ { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" },
+ { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" },
+ { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" },
+ { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" },
+ { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" },
+ { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" },
+ { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" },
+ { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" },
+ { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" },
+ { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" },
+ { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" },
+]