diff --git a/.dockerignore b/.dockerignore index ff542b8..cb66c2e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -69,6 +69,11 @@ coverage.xml .mypy_cache/ .ruff_cache/ +# Build-context trimming (not needed inside the image) +artifacts/ +quant-strategies/ +tests/ + # Temporary files *.tmp *.temp diff --git a/.env.example b/.env.example index e8d9327..934d34e 100644 --- a/.env.example +++ b/.env.example @@ -62,6 +62,11 @@ PGADMIN_DEFAULT_EMAIL=admin@quant.local PGADMIN_DEFAULT_PASSWORD=quantpass PGADMIN_CONFIG_SERVER_MODE=False +# Optional Redis for recent overlay cache +# To enable Redis service by default in Compose: export COMPOSE_PROFILES=redis +USE_REDIS_RECENT=false +REDIS_URL=redis://redis:6379/0 + # Cache directory (for temporary files) CACHE_DIR=./cache @@ -79,15 +84,3 @@ OPENAI_MODEL=gpt-5-mini # Get API key at: https://console.anthropic.com/ ANTHROPIC_API_KEY=your_anthropic_api_key_here ANTHROPIC_MODEL=claude-3-5-sonnet-20241022 - -# ====================== -# CONFIGURATION PREFIX -# ====================== -# All config environment variables use QUANTPY_ prefix -# Example: QUANTPY_DATA_DEFAULT_INTERVAL=1h -# Example: QUANTPY_BACKTEST_INITIAL_CAPITAL=100000 - -# Common overrides: -# QUANTPY_DATA_DEFAULT_INTERVAL=1h -# QUANTPY_BACKTEST_INITIAL_CAPITAL=100000 -# QUANTPY_BACKTEST_DEFAULT_COMMISSION=0.001 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b8d07a3..6e8df41 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,8 @@ name: CI +permissions: + contents: read + on: push: branches: [ main ] @@ -10,6 +13,10 @@ jobs: ci: name: Code Quality & Tests runs-on: ubuntu-latest + env: + UNIFIED_MODELS_SQLITE: "1" # Force SQLite for unified models during CI + TESTING: "true" # Enable test-mode code paths + DATABASE_URL: "sqlite:///quant_unified_test.db" # Safety: default DB points to SQLite steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2302b38..27a3ce0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,5 +1,8 @@ name: Release +permissions: + contents: read + on: push: tags: diff --git a/.gitignore b/.gitignore index 9b8f1b2..489e691 100644 --- a/.gitignore +++ b/.gitignore @@ -37,6 +37,7 @@ exports/* !exports/**/.gitkeep dist/ build/ +artifacts/ *.tar.gz *.whl @@ -60,6 +61,11 @@ lean_config.json *.iml *.iws +# pyenv / direnv +.python-version +.direnv/ +.envrc + # Testing and Type Checking .coverage .coverage.* diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..c89996f --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,32 @@ +# Contributing + +Thanks for your interest in contributing to quant-system! + +## License and Use + +This repository is released under the Business Source License 1.1 (BUSL‑1.1). +Commercial use is restricted until the Change Date listed in `LICENSE`. On that +date, the project will convert to the MIT License. + +## Deprecations and Import Paths + +We recently renamed modules: + +- `src.core.portfolio_manager` → `src.core.collection_manager` (class: `PortfolioManager`) +- `src.utils.tradingview_alert_exporter` → `src.utils.tv_alert_exporter` + +Compatibility shims exist for now and will emit `DeprecationWarning`. Please +update imports to the new modules. The shims are scheduled for removal after +the next minor release. + +## Development + +- Use `docker compose` and the unified CLI. See `README.md` and `docs/docker.md`. +- Run `pre-commit` locally: `pre-commit install && pre-commit run -a`. +- Tests run inside Docker via the pre-commit hook. + +## Pull Requests + +- Keep PRs focused and small. +- Include tests for behavior changes. +- Pass pre-commit hooks (format, lint, tests). diff --git a/DOCKERFILE b/DOCKERFILE index e643b03..1c810d4 100644 --- a/DOCKERFILE +++ b/DOCKERFILE @@ -1,5 +1,5 @@ # Multi-stage build for optimized production image -FROM python:3.12-slim as base +FROM python:3.12-slim AS base # Set environment variables ENV PYTHONUNBUFFERED=1 \ @@ -35,7 +35,7 @@ RUN poetry config virtualenvs.create false \ && rm -rf $POETRY_CACHE_DIR # Production stage -FROM python:3.12-slim as production +FROM python:3.12-slim AS production # Set environment variables ENV PYTHONUNBUFFERED=1 \ @@ -78,7 +78,7 @@ HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \ CMD ["python", "-m", "src.cli.unified_cli", "--help"] # Development stage -FROM base as development +FROM base AS development # Install development dependencies RUN poetry install --no-root @@ -96,7 +96,7 @@ ENV ENVIRONMENT=development CMD ["bash"] # Testing stage -FROM development as testing +FROM development AS testing # Install test dependencies RUN poetry install @@ -109,7 +109,7 @@ COPY pytest.ini ./ CMD ["poetry", "run", "pytest", "tests/", "-v"] # Jupyter stage for data analysis -FROM development as jupyter +FROM development AS jupyter # Install Jupyter and additional analysis tools RUN poetry add jupyter jupyterlab plotly seaborn @@ -128,7 +128,7 @@ RUN mkdir -p /app/.jupyter && \ CMD ["jupyter", "lab", "--allow-root", "--config=/app/.jupyter/jupyter_notebook_config.py"] # API stage for web services -FROM production as api +FROM production AS api # Expose API port EXPOSE 8000 diff --git a/LICENSE b/LICENSE index 100e021..0aeeb9d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,34 @@ -MIT License - -Copyright (c) 2024 Louis Letcher - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +Business Source License 1.1 + +Licensor: Louis Letcher + +Licensed Work: quant-system +The Licensed Work is the repository at the following URL: +https://github.com/LouisLetcher/quant-system (or its successors) + +Additional Use Grant: You may use, copy, modify, create derivative works, and +redistribute the Licensed Work, for non-commercial purposes only. + +Change Date: 2028-01-01 + +Change License: On the Change Date, the Licensor will make the Licensed Work +available under the MIT License. + +Terms + +The Licensor permits you to use, copy, modify, create derivative works, and +redistribute the Licensed Work; provided, however, that any use is for +non-commercial purposes only. Any commercial use of the Licensed Work is +prohibited, except with the Licensor's prior written approval. + +This License does not grant you any trademark rights for the Licensor’s marks. + +To the extent permitted by applicable law, the Licensed Work is provided "as is" +and the Licensor disclaims all warranties and conditions, whether express or +implied, including but not limited to implied warranties of merchantability, +fitness for a particular purpose, title, and non-infringement. + +Any copy of the Licensed Work you make must include this License. + +For more information about the Business Source License, please see +https://mariadb.com/bsl11/ diff --git a/README.md b/README.md index 3fc8fa6..ced2aba 100644 --- a/README.md +++ b/README.md @@ -1,308 +1,229 @@ # Quant System -A comprehensive quantitative backtesting system built for institutional-grade collection analysis. Docker-based setup with production-grade features for analyzing stocks, bonds, crypto, forex, and commodities across global markets. +A unified, Dockerized quantitative backtesting and reporting system. Run cross‑strategy comparisons for asset collections (e.g., bonds) and persist results to PostgreSQL with exportable artifacts. ## 🚀 Quick Start -### Docker Setup (Only Way) +### Docker Setup ```bash # Clone repository git clone cd quant-system -# Start PostgreSQL database and services -docker-compose up -d postgres pgadmin +# Start PostgreSQL and pgAdmin +docker compose up -d postgres pgadmin -# Build and run main system -docker-compose build quant -docker-compose run --rm quant python -m src.cli.unified_cli --help +# Build the app image (uses DOCKERFILE) +docker compose build quant -# Run comprehensive collection backtesting -docker-compose run --rm quant python -m src.cli.unified_cli portfolio test-all \ - --portfolio config/collections/bonds.json \ - --metric sortino_ratio \ - --period max +# Show CLI help +docker compose run --rm quant python -m src.cli.unified_cli --help -# Interactive shell -docker-compose run --rm quant bash -``` - -## 📊 Features - -### Core Capabilities -- **Multi-Asset Support**: Stocks, bonds, crypto, forex, commodities via multiple data sources -- **AI Investment Recommendations**: Performance-based portfolio optimization with confidence scoring -- **Backtesting Library Integration**: Direct integration with `backtesting` library for institutional-grade performance analysis -- **Portfolio Analysis**: Risk-adjusted returns, correlation analysis, drawdown attribution -- **Data Integration**: PostgreSQL storage with Yahoo Finance, Bybit, Alpha Vantage APIs -- **Report Generation**: Automated quarterly HTML reports, CSV exports, TradingView alerts - -### Data Sources by Asset Class -- **Stocks/Bonds**: Yahoo Finance (primary), Alpha Vantage (fallback) -- **Crypto**: Bybit (primary), Yahoo Finance (fallback) -- **Forex**: Alpha Vantage, Twelve Data, Polygon.io -- **Commodities**: Yahoo Finance, Tiingo - -## 🏗️ Architecture - -``` -quant-system/ -├── src/ # Core source code -│ ├── core/ # Trading logic & backtesting -│ ├── cli/ # Command-line interface -│ └── utils/ # Utilities & data management -├── config/collections/ # Asset collections (stocks, bonds, crypto, forex) -├── exports/ # Organized exports (reports/alerts by quarter) -├── cache/ # Data cache (Docker mounted) -└── logs/ # System logs (Docker mounted) +# Interactive shell inside the app container +docker compose run --rm quant bash ``` ## 📈 Usage -### Portfolio Management -```bash -# Comprehensive collection testing (generates HTML reports + database data) -docker-compose run --rm quant python -m src.cli.unified_cli portfolio test-all \ - --portfolio config/collections/bonds.json \ - --metric sortino_ratio \ - --period max - -# Single symbol backtest -docker-compose run --rm quant python -m src.cli.unified_cli portfolio backtest \ - --symbols TLT IEF SHY \ - --strategy BuyAndHold \ - --start-date 2023-01-01 \ - --end-date 2024-12-31 - -# Compare multiple portfolios -docker-compose run --rm quant python -m src.cli.unified_cli portfolio compare \ - config/collections/stocks_traderfox_us_tech.json config/collections/bonds.json - -# Generate investment plan based on backtest results -docker-compose run --rm quant python -m src.cli.unified_cli portfolio plan \ - --portfolio config/collections/bonds.json -``` +See also: docs/pgadmin-and-performance.md for DB inspection and performance tips. -### AI Investment Recommendations -```bash -# Generate AI portfolio recommendations (creates markdown + HTML reports) -docker-compose run --rm quant python -m src.cli.unified_cli ai portfolio_recommend \ - --portfolio config/collections/bonds.json \ - --risk-tolerance moderate - -# Get specific recommendations by quarter -docker-compose run --rm quant python -m src.cli.unified_cli ai recommend \ - --quarter Q3_2025 --risk-tolerance aggressive - -# Explain asset recommendations -docker-compose run --rm quant python -m src.cli.unified_cli ai explain \ - --symbol TLT --timeframe 1d -``` - -### Data Management -```bash -# Download market data for collections -docker-compose run --rm quant python -m src.cli.unified_cli data download \ - --symbols TLT IEF SHY --asset-type bonds +The unified CLI currently exposes a single subcommand: `collection`. -# Show available data sources -docker-compose run --rm quant python -m src.cli.unified_cli data sources +### Run Bonds (1d interval, max period, all strategies) -# List symbols by asset type -docker-compose run --rm quant python -m src.cli.unified_cli data symbols --asset-type bonds +Use the collection key (`bonds`) or the JSON file path. The `direct` action runs the backtests and writes results to the DB. Add `--exports all` to generate CSV/HTML/TV/AI artifacts when possible. -# Cache management -docker-compose run --rm quant python -m src.cli.unified_cli cache stats -docker-compose run --rm quant python -m src.cli.unified_cli cache clear --older-than-days 30 -``` - -### Strategy Development ```bash -# List available strategies -docker-compose run --rm quant python -m src.cli.unified_cli strategy list +# Using the collection key (recommended) +docker compose run --rm \ + -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --action direct \ + --interval 1d \ + --period max \ + --strategies all \ + --exports all \ + --log-level INFO + +# Using the JSON file +docker compose run --rm \ + -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection config/collections/bonds.json \ + --action direct \ + --interval 1d \ + --period max \ + --strategies all \ + --exports all \ + --log-level INFO +``` + +Notes + +- Default metric is `sortino_ratio`. +- Strategies are mounted at `/app/external_strategies` via `docker-compose.yml`; `STRATEGIES_PATH` makes discovery explicit. +- Artifacts are written under `artifacts/run_*`. DB tables used include `runs`, `backtest_results`, `best_strategies`, and `run_artifacts`. +- pgAdmin is available at `http://localhost:5050` (defaults configured via `.env`/`.env.example`). + +### Dry Run (plan only + optional exports) -# Get strategy details -docker-compose run --rm quant python -m src.cli.unified_cli strategy info --strategy BuyAndHold - -# Test custom strategy -docker-compose run --rm quant python -m src.cli.unified_cli strategy test \ - --strategy-file external_strategies/my_strategy.py -``` - -### Optimization ```bash -# Optimize single strategy parameters -docker-compose run --rm quant python -m src.cli.unified_cli optimize single \ - --symbol TLT --strategy RSI --method genetic --iterations 100 - -# Batch optimization across multiple symbols -docker-compose run --rm quant python -m src.cli.unified_cli optimize batch \ - --symbols TLT IEF SHY --strategies RSI BollingerBands --workers 4 +docker compose run --rm \ + -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --interval 1d --period max --strategies all \ + --dry-run --exports all --log-level DEBUG ``` -### Analysis & Reporting -```bash -# Generate comprehensive analysis reports -docker-compose run --rm quant python -m src.cli.unified_cli analyze report \ - --portfolio config/collections/bonds.json +### Other Actions -# Compare strategy performance -docker-compose run --rm quant python -m src.cli.unified_cli analyze compare \ - --symbols TLT IEF SHY --strategies BuyAndHold RSI -``` +The `collection` subcommand supports these `--action` values: `backtest`, `direct`, `optimization`, `export`, `report`, `tradingview`. In most workflows, use `--action direct` and optionally `--exports`. ## 🔧 Configuration ### Environment Variables (.env) + ```bash -# PostgreSQL Database (primary storage) -DATABASE_URL=postgresql://quantuser:quantpass@localhost:5432/quant_system +# PostgreSQL (inside the container, use the service name 'postgres') +DATABASE_URL=postgresql://quantuser:quantpass@postgres:5432/quant_system -# Optional API keys for enhanced data access +# Optional data providers ALPHA_VANTAGE_API_KEY=your_key TWELVE_DATA_API_KEY=your_key POLYGON_API_KEY=your_key TIINGO_API_KEY=your_key FINNHUB_API_KEY=your_key -``` +BYBIT_API_KEY=your_key +BYBIT_API_SECRET=your_secret +BYBIT_TESTNET=false -### Collection Examples (config/collections/) - -#### Stocks Collection -```json -{ - "name": "US Large Cap Stocks", - "symbols": ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA"], - "data_sources": { - "primary": ["yahoo_finance"], - "fallback": ["alpha_vantage"] - } -} +# Optional LLMs +OPENAI_API_KEY=your_key +OPENAI_MODEL=gpt-4o +ANTHROPIC_API_KEY=your_key +ANTHROPIC_MODEL=claude-3-5-sonnet-20241022 ``` -#### Bonds Collection -```json -{ - "name": "US Treasury Bonds", - "symbols": ["TLT", "IEF", "SHY", "TIPS"], - "data_sources": { - "primary": ["yahoo_finance"], - "fallback": ["alpha_vantage"] - } -} -``` +Host access tips -#### Crypto Collection -```json -{ - "name": "Crypto Portfolio", - "symbols": ["BTCUSDT", "ETHUSDT", "SOLUSDT"], - "data_sources": { - "primary": ["bybit", "yahoo_finance"], - "fallback": ["alpha_vantage"] - } -} -``` +- Postgres is published on `localhost:5433` (mapped to container `5432`). +- pgAdmin runs at `http://localhost:5050` (see `.env` for credentials). -## 📊 Performance Metrics +### Collections -**Primary Metric: Sortino Ratio** (default) +Collections live under `config/collections/` and are split into: -**Why Sortino over Sharpe:** -- **Sortino** only penalizes **downside volatility** (what investors actually care about) -- **Sharpe** penalizes all volatility, including upside moves (which aren't really "risk") -- **Hedge funds prefer Sortino** because upside volatility is desirable +- `default/` (curated, liquid, fast to iterate) +- `custom/` (your own research sets) -**Metric Hierarchy for Quantitative Analysis:** -1. **Sortino Ratio** (primary) - Downside risk-adjusted returns -2. **Calmar Ratio** (secondary) - Annual return / Max drawdown -3. **Sharpe Ratio** (tertiary) - Traditional risk-adjusted returns -4. **Profit Factor** (supplementary) - Gross profit/loss ratio +Default examples: -**Additional Analysis:** -- **Drawdown Analysis**: Maximum drawdown, recovery periods -- **Volatility**: Standard deviation, downside deviation -- **Efficiency**: Win rate, risk-reward ratios +- Bonds: `default/bonds_core.json` (liquid bond ETFs), `default/bonds.json` (broader set) +- Commodities: `default/commodities_core.json` (gold/silver/energy/agriculture/broad) +- Crypto: `default/crypto_liquid.json` (top market-cap, USDT pairs) +- Forex: `default/forex_majors.json` (majors and key crosses; Yahoo Finance format `=X`) +- Indices: `default/indices_global_core.json` (SPY/QQQ/DIA/IWM/EFA/EEM/EWJ/FXI etc.) +- Stocks: `default/stocks_us_mega_core.json`, `default/stocks_us_growth_core.json` + - Factors: `default/stocks_us_value_core.json`, `default/stocks_us_quality_core.json`, `default/stocks_us_minvol_core.json` + - Global factors: `default/stocks_global_factor_core.json` + +Custom examples (research-driven): + +- `custom/stocks_traderfox_dax.json` +- `custom/stocks_traderfox_european.json` +- `custom/stocks_traderfox_us_financials.json` +- `custom/stocks_traderfox_us_healthcare.json` +- `custom/stocks_traderfox_us_tech.json` + +You can reference any collection by key without the folder prefix (resolver searches `default/` and `custom/`). For example, `bonds_core` resolves `config/collections/default/bonds_core.json`. ## 🧪 Testing ```bash # Run tests in Docker -docker-compose run --rm quant pytest +docker compose run --rm quant pytest ``` -## 📊 Export & Reporting +## 📊 Exports & Reporting + +Artifacts and exports are written under `artifacts/run_*` and `exports/`. When running with `--action direct` or `--dry-run`, pass `--exports csv,report,tradingview,ai` or `--exports all`. -### Export & Reporting ```bash -# Export collection-specific CSV data from database (quarterly summary) -docker-compose run --rm quant python -m src.cli.unified_cli reports export-csv \ - --portfolio config/collections/bonds.json --format quarterly \ - --quarter Q3 --year 2025 +# Produce exports from DB for bonds without re-running backtests +docker compose run --rm quant \ + python -m src.cli.unified_cli collection bonds --dry-run --exports all +``` -# Export best strategies by quarter for all collections -docker-compose run --rm quant python -m src.cli.unified_cli reports export-csv \ - --format best-strategies --quarter Q3 --year 2025 +Output locations and unified naming (`{Collection}_Collection_{Year}_{Quarter}_{Interval}`): +- CSV: `exports/csv/{Year}/{Quarter}/{Collection}_Collection_{Year}_{Quarter}_{Interval}.csv` +- HTML reports: `exports/reports/{Year}/{Quarter}/{Collection}_Collection_{Year}_{Quarter}_{Interval}.html` +- TradingView alerts (Markdown): `exports/tv_alerts/{Year}/{Quarter}/{Collection}_Collection_{Year}_{Quarter}_{Interval}.md` +- AI recommendations: + - Markdown: `exports/ai_reco/{Year}/{Quarter}/{Collection}_Collection_{Year}_{Quarter}_{Interval}.md` + - HTML (dark Tailwind): same path with `.html` and a Download CSV link -# Export TradingView alerts with proper naming convention -docker-compose run --rm quant python -m src.utils.tradingview_alert_exporter \ - --output bonds_collection_tradingview_alerts_Q3_2025 +Notes: +- Exporters are DB-backed (read best strategies); no HTML scraping. +- With multiple intervals in plan, filenames prefer `1d`. Pass `--interval 1d` to constrain both content and filenames. -# Generate AI investment recommendations (markdown format) -docker-compose run --rm quant python -m src.cli.unified_cli ai portfolio_recommend \ - --portfolio config/collections/bonds.json \ - --risk-tolerance moderate +## 🗄️ Data & Cache -# Organize reports by quarter/year -docker-compose run --rm quant python -m src.cli.unified_cli reports organize +- Split caching: the system maintains two layers for market data. + - Full snapshot: stored when requesting provider periods like `--period max` (long TTL). + - Recent overlay: normal runs cache the last ~90 days (short TTL). + - Reads merge both, prefer recent on overlap, and auto‑extend when a request exceeds cached range. +- Fresh fetch: add `--no-cache` (alias: `--fresh`) to bypass cache reads and fetch from the provider. The result still writes through to cache. +- Coverage probe: before backtests, the CLI samples a few symbols with `period=max` and prefers the source with the most rows and earliest start for this run. -# List available reports -docker-compose run --rm quant python -m src.cli.unified_cli reports list +### Prefetching Collections (avoid rate limits) -# Get latest report for portfolio -docker-compose run --rm quant python -m src.cli.unified_cli reports latest \ - --portfolio config/collections/bonds.json -``` +Use the prefetch script to refresh data on a schedule (e.g., nightly recent overlay and weekly full snapshot): -### Validation & Testing ```bash -# Validate strategy metrics against backtesting library -docker-compose run --rm quant python -m src.cli.unified_cli validate strategy \ - --symbol TLT --strategy BuyAndHold +# Full history snapshot (bonds) +docker compose run --rm quant \ + python scripts/prefetch_collection.py bonds --mode full --interval 1d -# Batch validate multiple strategies -docker-compose run --rm quant python -m src.cli.unified_cli validate batch \ - --symbols TLT IEF SHY --strategies BuyAndHold RSI +# Recent overlay (last 90 days) +docker compose run --rm quant \ + python scripts/prefetch_collection.py bonds --mode recent --interval 1d --recent-days 90 ``` -**TradingView Alert Format**: Includes strategy, timeframe, Sortino ratio, profit metrics, and placeholders like `{{close}}`, `{{timenow}}`, `{{strategy.order.action}}`. +Example cron (runs at 01:30 local time): -## 📁 Output & Storage +``` +30 1 * * * cd /path/to/quant-system && docker compose run --rm quant \ + python scripts/prefetch_collection.py bonds --mode recent --interval 1d --recent-days 90 >/dev/null 2>&1 +``` -**PostgreSQL Database (Primary Storage):** -- Market data with optimized indexes for Sortino analysis -- Backtest results with comprehensive performance metrics -- Portfolio configurations with Sortino-first optimization +### Optional Redis Overlay (advanced) -**Local Files (Organized by Quarter/Year):** -- `exports/reports/YYYY/QX/` - HTML portfolio reports by collection -- `exports/csv/YYYY/QX/` - CSV data exports by collection -- `exports/tradingview_alerts/YYYY/QX/` - TradingView alert exports -- `exports/recommendations/YYYY/QX/` - AI recommendation JSON exports -- `cache/` - Temporary files and quick access data -- `logs/` - System logs +- For higher throughput, you can use Redis for the “recent” layer and keep full snapshots on disk. +- Pros: very fast hot reads, simple TTL eviction. Cons: extra service; volatile if not persisted. +- Suggested setup: run Redis via compose, store recent overlay (last 90 days) with TTL ~24–48h; keep full history on disk (gzip). +- Current repo ships with file‑based caching; Redis is an optional enhancement and can be added without breaking existing flows. -## 🔒 Security +## 📚 Further Docs -- Environment variable-based API key management -- Docker containerization for isolation -- No external database dependencies for basic usage +- docs/pgadmin-and-performance.md — pgAdmin queries and performance tips +- docs/data-sources.md — supported providers and configuration +- docs/development.md — local dev, testing, and repo layout +- docs/docker.md — Docker specifics and mounts +- docs/features.md — feature overview and roadmap +- docs/cli-guide.md — CLI details and examples -## 📄 License +## 🛠️ Troubleshooting -MIT License - See [LICENSE](LICENSE) file for details. +- Command name: use `docker compose` (or legacy `docker-compose`) consistently. +- Subcommand: it is `collection` (singular), not `collections`. +- Strategy discovery: ensure strategies are mounted at `/app/external_strategies` and set `STRATEGIES_PATH=/app/external_strategies` when running. +- Database URL: inside containers use `postgres:5432` (`DATABASE_URL=postgresql://quantuser:quantpass@postgres:5432/quant_system`). On the host, Postgres is published at `localhost:5433`. +- Initialize tables: if tables are missing, run: + `docker compose run --rm quant python -c "from src.database.unified_models import create_tables; create_tables()"` +- Long runs/timeouts: backtests can take minutes to hours depending on strategies and symbols. Prefer `--log-level INFO` or `DEBUG` to monitor progress. Use `--dry-run` to validate plans quickly. Extra tips in docs/pgadmin-and-performance.md. +- Permissions/cache: ensure `cache/`, `exports/`, `logs/`, and `artifacts/` exist and are writable on the host (compose mounts them into the container). +- API limits: some data sources rate-limit; providing API keys in `.env` can reduce throttling. ---- +## ⚠️ Disclaimer -**⚠️ Disclaimer**: Educational purposes only. Not financial advice. Trade responsibly. +This project is for educational and research purposes only. It does not constitute financial advice. Use at your own risk and always perform your own due diligence before making investment decisions. diff --git a/alembic/versions/0001_unified_cli_schema.py b/alembic/versions/0001_unified_cli_schema.py new file mode 100644 index 0000000..135aa1b --- /dev/null +++ b/alembic/versions/0001_unified_cli_schema.py @@ -0,0 +1,153 @@ +"""create unified CLI schema (runs, backtest_results, trades, symbol_aggregates, run_artifacts) + +Revision ID: 0001_unified_cli_schema +Revises: +Create Date: 2025-08-27 07:09:00.000000 + +""" + +from __future__ import annotations + +import sqlalchemy as sa +import sqlalchemy.dialects.postgresql as pg + +from alembic import op + +# revision identifiers, used by Alembic. +revision = "0001_unified_cli_schema" +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # Runs table + op.create_table( + "runs", + sa.Column("run_id", sa.String(length=36), primary_key=True), + sa.Column( + "started_at_utc", + sa.DateTime(timezone=True), + nullable=False, + server_default=sa.text("now()"), + ), + sa.Column("finished_at_utc", sa.DateTime(timezone=True), nullable=True), + sa.Column("actor", sa.String(length=128), nullable=False), + sa.Column("action", sa.String(length=64), nullable=False), + sa.Column("collection_ref", sa.Text(), nullable=False), + sa.Column("strategies_mode", sa.String(length=256), nullable=False), + sa.Column("intervals_mode", sa.String(length=256), nullable=False), + sa.Column("target_metric", sa.String(length=64), nullable=False), + sa.Column("period_mode", sa.String(length=64), nullable=False), + sa.Column("args_json", pg.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("git_sha_app", sa.String(length=64), nullable=True), + sa.Column("git_sha_strat", sa.String(length=64), nullable=True), + sa.Column("data_source", sa.String(length=128), nullable=True), + sa.Column("plan_hash", sa.String(length=128), nullable=False, unique=True), + sa.Column( + "status", sa.String(length=32), nullable=False, server_default="running" + ), + sa.Column("error_summary", sa.Text(), nullable=True), + ) + + # Backtest results + op.create_table( + "backtest_results", + sa.Column("result_id", sa.String(length=36), primary_key=True), + sa.Column( + "run_id", + sa.String(length=36), + sa.ForeignKey("runs.run_id", ondelete="CASCADE"), + nullable=False, + index=True, + ), + sa.Column("symbol", sa.String(length=64), nullable=False, index=True), + sa.Column("strategy", sa.String(length=256), nullable=False, index=True), + sa.Column("interval", sa.String(length=32), nullable=False, index=True), + sa.Column("start_at_utc", sa.DateTime(timezone=True), nullable=True), + sa.Column("end_at_utc", sa.DateTime(timezone=True), nullable=True), + sa.Column("rank_in_symbol", sa.Integer(), nullable=True), + sa.Column("metrics", pg.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("engine_ctx", pg.JSONB(astext_type=sa.Text()), nullable=True), + sa.Column("trades_raw", sa.Text(), nullable=True), + sa.Column("error", sa.Text(), nullable=True), + sa.UniqueConstraint( + "run_id", + "symbol", + "strategy", + "interval", + name="uq_run_symbol_strategy_interval", + ), + ) + + # Trades + op.create_table( + "trades", + sa.Column("trade_id", sa.String(length=36), primary_key=True), + sa.Column( + "result_id", + sa.String(length=36), + sa.ForeignKey("backtest_results.result_id", ondelete="CASCADE"), + nullable=False, + index=True, + ), + sa.Column("trade_index", sa.Integer(), nullable=False), + sa.Column("size", sa.String(length=64), nullable=True), + sa.Column("entry_bar", sa.BigInteger(), nullable=True), + sa.Column("exit_bar", sa.BigInteger(), nullable=True), + sa.Column("entry_price", sa.String(length=64), nullable=True), + sa.Column("exit_price", sa.String(length=64), nullable=True), + sa.Column("pnl", sa.String(length=64), nullable=True), + sa.Column("duration", sa.Interval(), nullable=True), + sa.Column("tag", sa.String(length=128), nullable=True), + sa.Column("entry_signals", sa.Text(), nullable=True), + sa.Column("exit_signals", sa.Text(), nullable=True), + sa.UniqueConstraint("result_id", "trade_index", name="uq_result_trade_index"), + ) + + # Symbol aggregates + op.create_table( + "symbol_aggregates", + sa.Column("id", sa.String(length=36), primary_key=True), + sa.Column( + "run_id", + sa.String(length=36), + sa.ForeignKey("runs.run_id", ondelete="CASCADE"), + nullable=False, + index=True, + ), + sa.Column("symbol", sa.String(length=64), nullable=False), + sa.Column("best_by", sa.String(length=64), nullable=False), + sa.Column( + "best_result", + sa.String(length=36), + sa.ForeignKey("backtest_results.result_id", ondelete="CASCADE"), + nullable=False, + ), + sa.Column("summary", pg.JSONB(astext_type=sa.Text()), nullable=False), + sa.UniqueConstraint("run_id", "symbol", "best_by", name="uq_run_symbol_bestby"), + ) + + # Run artifacts + op.create_table( + "run_artifacts", + sa.Column("artifact_id", sa.String(length=36), primary_key=True), + sa.Column( + "run_id", + sa.String(length=36), + sa.ForeignKey("runs.run_id", ondelete="CASCADE"), + nullable=False, + index=True, + ), + sa.Column("artifact_type", sa.String(length=64), nullable=False), + sa.Column("path_or_uri", sa.Text(), nullable=False), + sa.Column("meta", pg.JSONB(astext_type=sa.Text()), nullable=True), + ) + + +def downgrade() -> None: + op.drop_table("run_artifacts") + op.drop_table("symbol_aggregates") + op.drop_table("trades") + op.drop_table("backtest_results") + op.drop_table("runs") diff --git a/config/collections/bonds.json b/config/collections/bonds.json deleted file mode 100644 index 2065bdd..0000000 --- a/config/collections/bonds.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "bonds": { - "name": "Bonds Collection", - "asset_type": "bond", - "symbols": [ - "TLT", - "IEF", - "SHY", - "LQD", - "HYG", - "EMB", - "TIP", - "VTEB", - "AGG", - "BND", - "VGIT", - "VCIT", - "GOVT", - "SCHO", - "IEI", - "SCHZ", - "BWX", - "IGOV", - "WIP", - "MUB", - "VMBS", - "VCSH", - "VCLT", - "VGSH", - "VGLT", - "EDV", - "ZROZ", - "FLOT", - "NEAR", - "JPST" - ], - "initial_capital": 10000, - "commission": 0.001 - } -} diff --git a/config/collections/commodities.json b/config/collections/commodities.json deleted file mode 100644 index cf757f0..0000000 --- a/config/collections/commodities.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "commodities": { - "name": "Commodities Collection", - "asset_type": "commodity", - "symbols": [ - "XAUUSD", - "XAGUSD", - "XPTUSD", - "XPDUSD", - "USOIL", - "UKOIL", - "NGAS", - "COPPER", - "WHEAT", - "CORN", - "SOYBEAN", - "SUGAR", - "COFFEE", - "COCOA", - "COTTON", - "RICE", - "OATS", - "CATTLE", - "HOGS", - "ALUMINUM", - "NICKEL", - "ZINC", - "LEAD", - "TIN", - "URANIUM", - "LUMBER", - "RUBBER", - "ORANGE_JUICE", - "MILK", - "CHEESE", - "BUTTER", - "FEEDER_CATTLE", - "LEAN_HOGS", - "ROUGH_RICE", - "CANOLA", - "PALM_OIL", - "GASOLINE", - "HEATING_OIL", - "ETHANOL", - "COAL", - "IRON_ORE", - "STEEL", - "RHODIUM", - "IRIDIUM", - "OSMIUM", - "RUTHENIUM" - ], - "initial_capital": 10000, - "commission": 0.001 - } -} diff --git a/config/collections/crypto.json b/config/collections/crypto.json deleted file mode 100644 index 648292d..0000000 --- a/config/collections/crypto.json +++ /dev/null @@ -1,443 +0,0 @@ -{ - "crypto": { - "name": "Crypto Collection", - "asset_type": "crypto", - "symbols": [ - "1CATUSDT", - "1INCHUSDT", - "A8USDT", - "AAVEUSDT", - "ACEUSDT", - "ACHUSDT", - "ADAUSD", - "ADAUSDT", - "AERGOUSDT", - "AEROUSDT", - "AEVOUSDT", - "AGIUSDT", - "AGIXUSDT", - "AGLDUSDT", - "AIDOGEUSDT", - "AIOZUSDT", - "AIUSDT", - "AKROUSDT", - "AKTUSDT", - "ALGOUSDT", - "ALICEUSDT", - "ALPACAUSDT", - "ALPHAUSDT", - "ALTUSDT", - "AMBUSDT", - "ANKRUSDT", - "ANTUSDT", - "APEUSDT", - "API3USDT", - "APTUSDT", - "APUUSDT", - "ARBUSDT", - "ARKMUSDT", - "ARKUSDT", - "ARPAUSDT", - "ARUSDT", - "ASTRUSDT", - "ATAUSDT", - "ATHUSDT", - "ATOMUSDT", - "AUCTIONUSDT", - "AUDIOUSDT", - "AVAILUSDT", - "AVAXUSDT", - "AXLUSDT", - "AXSUSDT", - "BABYDOGEUSDT", - "BADGERUSDT", - "BAKEUSDT", - "BALUSDT", - "BANANAUSDT", - "BANDUSDT", - "BATUSDT", - "BBUSDT", - "BCHUSDT", - "BEAMUSDT", - "BEERUSDT", - "BELUSDT", - "BENDOGUSDT", - "BICOUSDT", - "BIGTIMEUSDT", - "BLASTUSDT", - "BLURUSDT", - "BLZUSDT", - "BNBUSDT", - "BNTUSDT", - "BNXUSDT", - "BOBAUSDT", - "BOMEUSDT", - "BONDUSDT", - "BONKUSDT", - "BRETTUSDT", - "BSVUSDT", - "BSWUSDT", - "BTCUSD", - "BTCUSDT", - "BTTUSDT", - "BUSDUSDT", - "C98USDT", - "CAKEUSDT", - "CEEKUSDT", - "CELOUSDT", - "CELRUSDT", - "CETUSUSDT", - "CFXUSDT", - "CHRUSDT", - "CHZUSDT", - "CKBUSDT", - "CLOUDUSDT", - "COMBOUSDT", - "COMPUSDT", - "COQUSDT", - "COREUSDT", - "COSUSDT", - "COTIUSDT", - "COVALUSDT", - "CROUSDT", - "CRVUSDT", - "CTCUSDT", - "CTKUSDT", - "CTSIUSDT", - "CVCUSDT", - "CVXUSDT", - "CYBERUSDT", - "DAOUSDT", - "DARUSDT", - "DASHUSDT", - "DATAUSDT", - "DEGENUSDT", - "DENTUSDT", - "DEXEUSDT", - "DGBUSDT", - "DODOUSDT", - "DOGEUSDT", - "DOGSUSDT", - "DOGUSDT", - "DOP1USDT", - "DOTUSD", - "DOTUSDT", - "DRIFTUSDT", - "DUSKUSDT", - "DYDXUSDT", - "DYMUSDT", - "EDUUSDT", - "EGLDUSDT", - "ENAUSDT", - "ENJUSDT", - "ENSUSDT", - "EOSUSD", - "EOSUSDT", - "ETCUSDT", - "ETHBTCUSDT", - "ETHFIUSDT", - "ETHUSD", - "ETHUSDT", - "ETHWUSDT", - "FDUSDUSDT", - "FETUSDT", - "FILUSDT", - "FIREUSDT", - "FITFIUSDT", - "FLMUSDT", - "FLOKIUSDT", - "FLOWUSDT", - "FLRUSDT", - "FORTHUSDT", - "FOXYUSDT", - "FRONTUSDT", - "FTMUSDT", - "FTNUSDT", - "FUNUSDT", - "FXSUSDT", - "GALAUSDT", - "GALUSDT", - "GASUSDT", - "GFTUSDT", - "GLMRUSDT", - "GLMUSDT", - "GMEUSDT", - "GMTUSDT", - "GMXUSDT", - "GNOUSDT", - "GODSUSDT", - "GPTUSDT", - "GRTUSDT", - "GTCUSDT", - "GUSDT", - "HBARUSDT", - "HFTUSDT", - "HIFIUSDT", - "HIGHUSDT", - "HNTUSDT", - "HOOKUSDT", - "HOTUSDT", - "ICPUSDT", - "ICXUSDT", - "IDEXUSDT", - "IDUSDT", - "ILVUSDT", - "IMXUSDT", - "INJUSDT", - "IOSTUSDT", - "IOTAUSDT", - "IOTXUSDT", - "IOUSDT", - "IQ50USDT", - "JASMYUSDT", - "JOEUSDT", - "JSTUSDT", - "JTOUSDT", - "JUPUSDT", - "KASUSDT", - "KAVAUSDT", - "KDAUSDT", - "KEYUSDT", - "KLAYUSDT", - "KNCUSDT", - "KSMUSDT", - "L3USDT", - "LADYSUSDT", - "LAIUSDT", - "LDOUSDT", - "LEVERUSDT", - "LINAUSDT", - "LINKUSDT", - "LISTAUSDT", - "LITUSDT", - "LOOKSUSDT", - "LOOMUSDT", - "LPTUSDT", - "LQTYUSDT", - "LRCUSDT", - "LSKUSDT", - "LTCUSD", - "LTCUSDT", - "LTOUSDT", - "LUNA2USDT", - "LUNCUSDT", - "MAGICUSDT", - "MANAUSD", - "MANAUSDT", - "MANEKIUSDT", - "MANTAUSDT", - "MAPOUSDT", - "MASAUSDT", - "MASKUSDT", - "MATICUSDT", - "MAVIAUSDT", - "MAVUSDT", - "MAXUSDT", - "MBLUSDT", - "MBOXUSDT", - "MCUSDT", - "MDTUSDT", - "MEMEUSDT", - "MERLUSDT", - "METISUSDT", - "MEWUSDT", - "MINAUSDT", - "MKRUSDT", - "MNTUSDT", - "MOBILEUSDT", - "MOCAUSDT", - "MOGUSDT", - "MONUSDT", - "MOTHERUSDT", - "MOVRUSDT", - "MTLUSDT", - "MULTIUSDT", - "MYRIAUSDT", - "MYROUSDT", - "NEARUSDT", - "NEIROETHUSDT", - "NEOUSDT", - "NFPUSDT", - "NFTUSDT", - "NKNUSDT", - "NMRUSDT", - "NOTUSDT", - "NTRNUSDT", - "NULSUSDT", - "NYANUSDT", - "OCEANUSDT", - "OGNUSDT", - "OGUSDT", - "OMGUSDT", - "OMNIUSDT", - "OMUSDT", - "ONDOUSDT", - "ONEUSDT", - "ONGUSDT", - "ONTUSDT", - "OPUSDT", - "ORBSUSDT", - "ORCAUSDT", - "ORDERUSDT", - "ORDIUSDT", - "ORNUSDT", - "OSMOUSDT", - "OXTUSDT", - "PAXGUSDT", - "PEIPEIUSDT", - "PENDLEUSDT", - "PENGUSDT", - "PEOPLEUSDT", - "PEPEUSDT", - "PERPUSDT", - "PHAUSDT", - "PHBUSDT", - "PIRATEUSDT", - "PIXELUSDT", - "PIXFIUSDT", - "POLYXUSDT", - "PONKEUSDT", - "POPCATUSDT", - "PORTALUSDT", - "POWRUSDT", - "PRCLUSDT", - "PROMUSDT", - "PUNDUUSDT", - "PYTHUSDT", - "QIUSDT", - "QNTUSDT", - "QTUMUSDT", - "RADUSDT", - "RAREUSDT", - "RATSUSDT", - "RAYDIUMUSDT", - "RAYUSDT", - "RDNTUSDT", - "REEFUSDT", - "RENDERUSDT", - "RENUSDT", - "REQUSDT", - "REZUSDT", - "RIFUSDT", - "RLCUSDT", - "RNDRUSDT", - "RONUSDT", - "ROSEUSDT", - "RPLUSDT", - "RSRUSDT", - "RSS3USDT", - "RUNEUSDT", - "RVNUSDT", - "SAFEUSDT", - "SAGAUSDT", - "SANDUSDT", - "SATSUSDT", - "SCAUSDT", - "SCRTUSDT", - "SCUSDT", - "SEIUSDT", - "SFPUSDT", - "SHIB1000USDT", - "SILLYUSDT", - "SKLUSDT", - "SLERFUSDT", - "SLPUSDT", - "SNTUSDT", - "SNXUSDT", - "SOLUSD", - "SOLUSDT", - "SPECUSDT", - "SPELLUSDT", - "SSVUSDT", - "STARLUSDT", - "STEEMUSDT", - "STGUSDT", - "STMXUSDT", - "STORJUSDT", - "STPTUSDT", - "STRAXUSDT", - "STRKUSDT", - "STXUSDT", - "SUIUSDT", - "SUNDOGUSDT", - "SUNUSDT", - "SUPERUSDT", - "SUSHIUSDT", - "SWEATUSDT", - "SXPUSDT", - "SYNUSDT", - "SYSUSDT", - "TAIKOUSDT", - "TAOUSDT", - "THETAUSDT", - "TIAUSDT", - "TLMUSDT", - "TNSRUSDT", - "TOKENUSDT", - "TOMIUSDT", - "TOMOUSDT", - "TONUSDT", - "TRBUSDT", - "TRUUSDT", - "TRXUSDT", - "TURBOUSDT", - "TUSDT", - "TWTUSDT", - "UMAUSDT", - "UNFIUSDT", - "UNIUSDT", - "USDCUSDT", - "USDEUSDT", - "USTCUSDT", - "UXLINKUSDT", - "VANRYUSDT", - "VELOUSDT", - "VETUSDT", - "VGXUSDT", - "VIDTUSDT", - "VINUUSDT", - "VOXELUSDT", - "VRAUSDT", - "VTHOUSDT", - "WAVESUSDT", - "WAXPUSDT", - "WENUSDT", - "WIFUSDT", - "WLDUSDT", - "WOOUSDT", - "WSMUSDT", - "WUSDT", - "XAIUSDT", - "XCHUSDT", - "XCNUSDT", - "XECUSDT", - "XEMUSDT", - "XLMUSDT", - "XMRUSDT", - "XNOUSDT", - "XRDUSDT", - "XRPUSD", - "XRPUSDT", - "XTZUSDT", - "XVGUSDT", - "XVSUSDT", - "YFIIUSDT", - "YFIUSDT", - "YGGUSDT", - "ZBCNUSDT", - "ZCXUSDT", - "ZECUSDT", - "ZENUSDT", - "ZETAUSDT", - "ZEUSUSDT", - "ZILUSDT", - "ZKFUSDT", - "ZKJUSDT", - "ZKUSDT", - "ZROUSDT", - "ZRXUSDT" - ], - "initial_capital": 10000, - "commission": 0.001 - } -} diff --git a/config/collections/stocks_traderfox_dax.json b/config/collections/custom/stocks_traderfox_dax.json similarity index 100% rename from config/collections/stocks_traderfox_dax.json rename to config/collections/custom/stocks_traderfox_dax.json diff --git a/config/collections/stocks_traderfox_european.json b/config/collections/custom/stocks_traderfox_european.json similarity index 100% rename from config/collections/stocks_traderfox_european.json rename to config/collections/custom/stocks_traderfox_european.json diff --git a/config/collections/stocks_traderfox_us_financials.json b/config/collections/custom/stocks_traderfox_us_financials.json similarity index 100% rename from config/collections/stocks_traderfox_us_financials.json rename to config/collections/custom/stocks_traderfox_us_financials.json diff --git a/config/collections/stocks_traderfox_us_healthcare.json b/config/collections/custom/stocks_traderfox_us_healthcare.json similarity index 100% rename from config/collections/stocks_traderfox_us_healthcare.json rename to config/collections/custom/stocks_traderfox_us_healthcare.json diff --git a/config/collections/stocks_traderfox_us_tech.json b/config/collections/custom/stocks_traderfox_us_tech.json similarity index 100% rename from config/collections/stocks_traderfox_us_tech.json rename to config/collections/custom/stocks_traderfox_us_tech.json diff --git a/config/collections/default/bonds_core.json b/config/collections/default/bonds_core.json new file mode 100644 index 0000000..2363ef5 --- /dev/null +++ b/config/collections/default/bonds_core.json @@ -0,0 +1,13 @@ +{ + "bonds_core": { + "name": "Bonds Core (Liquid ETFs)", + "asset_type": "bond", + "symbols": [ + "AGG", "BND", "TLT", "IEF", "SHY", + "LQD", "HYG", "JNK", "EMB", "TIP", + "IEI", "VGIT", "GOVT", "MUB", "VCSH" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/commodities_core.json b/config/collections/default/commodities_core.json new file mode 100644 index 0000000..5abd72b --- /dev/null +++ b/config/collections/default/commodities_core.json @@ -0,0 +1,13 @@ +{ + "commodities_core": { + "name": "Commodities Core (Liquid ETFs)", + "asset_type": "commodities", + "symbols": [ + "GLD", "IAU", "SLV", "PPLT", "PALL", + "DBC", "DBA", "USO", "UNG", "UGA", + "CORN", "SOYB", "WEAT", "COMT" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/crypto_liquid.json b/config/collections/default/crypto_liquid.json new file mode 100644 index 0000000..cd156bb --- /dev/null +++ b/config/collections/default/crypto_liquid.json @@ -0,0 +1,13 @@ +{ + "crypto_liquid": { + "name": "Crypto Liquid (Top Market Cap)", + "asset_type": "crypto", + "symbols": [ + "BTCUSDT", "ETHUSDT", "BNBUSDT", "SOLUSDT", "XRPUSDT", + "ADAUSDT", "DOGEUSDT", "LTCUSDT", "DOTUSDT", "AVAXUSDT", + "LINKUSDT" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/forex_majors.json b/config/collections/default/forex_majors.json new file mode 100644 index 0000000..44172ec --- /dev/null +++ b/config/collections/default/forex_majors.json @@ -0,0 +1,12 @@ +{ + "forex_majors": { + "name": "Forex Majors", + "asset_type": "forex", + "symbols": [ + "EURUSD=X", "GBPUSD=X", "USDJPY=X", "USDCHF=X", "AUDUSD=X", + "USDCAD=X", "NZDUSD=X", "EURJPY=X", "GBPJPY=X", "EURGBP=X" + ], + "initial_capital": 10000, + "commission": 0.0002 + } +} diff --git a/config/collections/default/indices_global_core.json b/config/collections/default/indices_global_core.json new file mode 100644 index 0000000..d49af86 --- /dev/null +++ b/config/collections/default/indices_global_core.json @@ -0,0 +1,13 @@ +{ + "indices_global_core": { + "name": "Global Indices (ETFs)", + "asset_type": "indices", + "symbols": [ + "SPY", "QQQ", "DIA", "IWM", + "EFA", "EEM", "VGK", "EWJ", "FXI", + "VTI", "ACWI" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/stocks_global_factor_core.json b/config/collections/default/stocks_global_factor_core.json new file mode 100644 index 0000000..e259cd8 --- /dev/null +++ b/config/collections/default/stocks_global_factor_core.json @@ -0,0 +1,20 @@ +{ + "stocks_global_factor_core": { + "name": "Global Factor Core (ACWI + Factors)", + "asset_type": "stocks", + "symbols": [ + "ACWI", + "ACWV", + "IQLT", + "QUAL", + "VLUE", + "IVLU", + "MTUM", + "IMTM", + "SIZE", + "ISZE" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/stocks_us_growth_core.json b/config/collections/default/stocks_us_growth_core.json new file mode 100644 index 0000000..a06d602 --- /dev/null +++ b/config/collections/default/stocks_us_growth_core.json @@ -0,0 +1,12 @@ +{ + "stocks_us_growth_core": { + "name": "US Growth Tech Core", + "asset_type": "stocks", + "symbols": [ + "AAPL", "MSFT", "NVDA", "AVGO", "META", + "GOOGL", "AMD", "CRM", "ORCL", "NOW" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/stocks_us_mega_core.json b/config/collections/default/stocks_us_mega_core.json new file mode 100644 index 0000000..11acabc --- /dev/null +++ b/config/collections/default/stocks_us_mega_core.json @@ -0,0 +1,12 @@ +{ + "stocks_us_mega_core": { + "name": "US Mega-Cap Core", + "asset_type": "stocks", + "symbols": [ + "AAPL", "MSFT", "GOOGL", "AMZN", "META", + "NVDA", "TSLA", "BRK-B", "JPM", "JNJ" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/stocks_us_minvol_core.json b/config/collections/default/stocks_us_minvol_core.json new file mode 100644 index 0000000..a18df74 --- /dev/null +++ b/config/collections/default/stocks_us_minvol_core.json @@ -0,0 +1,20 @@ +{ + "stocks_us_minvol_core": { + "name": "US Min-Vol Core (ETFs)", + "asset_type": "stocks", + "symbols": [ + "USMV", + "SPLV", + "XMLV", + "SMLV", + "LVHD", + "VFMV", + "EFAV", + "ACWV", + "LOWV", + "LGLV" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/stocks_us_quality_core.json b/config/collections/default/stocks_us_quality_core.json new file mode 100644 index 0000000..2c0bc70 --- /dev/null +++ b/config/collections/default/stocks_us_quality_core.json @@ -0,0 +1,20 @@ +{ + "stocks_us_quality_core": { + "name": "US Quality Core (ETFs)", + "asset_type": "stocks", + "symbols": [ + "QUAL", + "SPHQ", + "VFQY", + "QDF", + "DGRW", + "JQUA", + "OUSA", + "FGQG", + "QGRW", + "JQUA" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/default/stocks_us_value_core.json b/config/collections/default/stocks_us_value_core.json new file mode 100644 index 0000000..2a45e2d --- /dev/null +++ b/config/collections/default/stocks_us_value_core.json @@ -0,0 +1,20 @@ +{ + "stocks_us_value_core": { + "name": "US Value Core (ETFs)", + "asset_type": "stocks", + "symbols": [ + "VTV", + "IWD", + "SCHV", + "RPV", + "SPYV", + "IVE", + "VONV", + "VLUE", + "VYM", + "DVY" + ], + "initial_capital": 10000, + "commission": 0.001 + } +} diff --git a/config/collections/forex.json b/config/collections/forex.json deleted file mode 100644 index 729689c..0000000 --- a/config/collections/forex.json +++ /dev/null @@ -1,87 +0,0 @@ -{ - "forex": { - "name": "Forex Portfolio", - "description": "Comprehensive forex portfolio covering major, minor, and exotic currency pairs including G7, European, Scandinavian, Eastern European, emerging market, and Asian currencies", - "asset_type": "forex", - "symbols": [ - "EURUSD=X", - "GBPUSD=X", - "USDJPY=X", - "USDCHF=X", - "AUDUSD=X", - "USDCAD=X", - "NZDUSD=X", - "EURJPY=X", - "GBPJPY=X", - "EURGBP=X", - "AUDJPY=X", - "EURAUD=X", - "EURCHF=X", - "AUDNZD=X", - "GBPAUD=X", - "GBPCAD=X", - "CHFJPY=X", - "CADJPY=X", - "NZDJPY=X", - "AUDCAD=X", - "AUDCHF=X", - "CADCHF=X", - "EURNZD=X", - "GBPCHF=X", - "GBPNZD=X", - "NZDCAD=X", - "NZDCHF=X", - "EURPLN=X", - "USDSEK=X", - "USDNOK=X", - "USDDKK=X", - "EURCZK=X", - "EURHUF=X", - "EURRON=X", - "GBPPLN=X", - "USDPLN=X", - "USDCZK=X", - "USDHUF=X", - "USDRON=X", - "USDRUB=X", - "EURRUB=X", - "USDTRY=X", - "EURTRY=X", - "USDZAR=X", - "EURZAR=X", - "GBPZAR=X", - "USDBRL=X", - "EURBRL=X", - "USDMXN=X", - "EURMXN=X", - "USDCNY=X", - "EURCNY=X", - "GBPCNY=X", - "AUDCNY=X", - "USDHKD=X", - "EURSGD=X", - "USDSGD=X", - "GBPSGD=X", - "AUDSGD=X", - "NZDSGD=X", - "USDKRW=X", - "EURKRW=X", - "USDINR=X", - "EURINR=X", - "GBPINR=X", - "JPYINR=X", - "USDTHB=X", - "EURTHB=X", - "USDPHP=X", - "EURPHP=X", - "USDIDR=X", - "EURIDR=X", - "USDVND=X", - "EURVND=X", - "USDMYR=X", - "EURMYR=X" - ], - "initial_capital": 10000, - "commission": 0.001 - } -} diff --git a/config/collections/indices.json b/config/collections/indices.json deleted file mode 100644 index 72e26ed..0000000 --- a/config/collections/indices.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "indices": { - "name": "Indices Collection", - "asset_type": "stock", - "symbols": [ - "SPY", - "VTI", - "QQQ", - "IWM", - "EFA", - "VEA", - "EEM", - "VWO", - "DIA", - "MDY", - "IJH", - "IJR", - "VXF", - "VXUS", - "ACWI", - "ITOT", - "IXUS", - "IEFA", - "IEMG", - "SCHB", - "SCHA", - "SCHM", - "SCHF", - "SCHY", - "SCHE", - "SCHC" - ], - "initial_capital": 10000, - "commission": 0.001 - } -} diff --git a/config/optimization_config.json b/config/optimization_config.json deleted file mode 100644 index 7458c8f..0000000 --- a/config/optimization_config.json +++ /dev/null @@ -1,193 +0,0 @@ -{ - "data_sources": { - "yahoo_finance": { - "enabled": true, - "priority": 1, - "rate_limit": 1.5, - "max_retries": 3, - "supports_batch": true, - "max_symbols_per_request": 100 - }, - "alpha_vantage": { - "enabled": false, - "priority": 2, - "rate_limit": 12, - "max_retries": 3, - "api_key_env": "ALPHA_VANTAGE_API_KEY", - "supports_batch": false, - "max_symbols_per_request": 1, - "daily_limit": 500 - }, - "twelve_data": { - "enabled": false, - "priority": 3, - "rate_limit": 1.0, - "max_retries": 3, - "api_key_env": "TWELVE_DATA_API_KEY", - "supports_batch": true, - "max_symbols_per_request": 8, - "daily_limit": 800 - } - }, - "caching": { - "max_size_gb": 10.0, - "data_ttl_hours": 48, - "backtest_ttl_days": 30, - "optimization_ttl_days": 60, - "compression_enabled": true, - "cleanup_on_startup": true - }, - "backtesting": { - "default_initial_capital": 10000, - "default_commission": 0.001, - "max_workers": "auto", - "memory_limit_gb": 8.0, - "batch_size": "auto", - "save_trades_by_default": false, - "save_equity_curves_by_default": false - }, - "optimization": { - "methods": { - "genetic_algorithm": { - "default_population_size": 50, - "default_max_iterations": 100, - "mutation_rate": 0.1, - "crossover_rate": 0.7, - "early_stopping_patience": 20, - "elite_percentage": 0.1, - "tournament_size": 3 - }, - "grid_search": { - "max_combinations": 10000, - "parallel_evaluation": true - }, - "bayesian": { - "n_initial_points": 10, - "acquisition_function": "expected_improvement", - "kernel": "matern", - "normalize_y": true - } - }, - "default_metric": "sharpe_ratio", - "constraint_functions": [] - }, - "strategy_parameters": { - "rsi": { - "period": [10, 14, 20, 30], - "overbought": [70, 75, 80], - "oversold": [20, 25, 30] - }, - "macd": { - "fast": [8, 12, 16], - "slow": [21, 26, 30], - "signal": [6, 9, 12] - }, - "bollinger_bands": { - "period": [15, 20, 25], - "deviation": [1.5, 2.0, 2.5] - }, - "moving_average_crossover": { - "fast_period": [5, 10, 15, 20], - "slow_period": [20, 30, 50, 100] - }, - "adx": { - "period": [10, 14, 20], - "threshold": [20, 25, 30] - }, - "mfi": { - "period": [10, 14, 20], - "overbought": [80, 85], - "oversold": [15, 20] - }, - "turtle_trading": { - "entry_period": [10, 20, 30], - "exit_period": [5, 10, 15], - "atr_period": [14, 20] - }, - "linear_regression": { - "period": [14, 20, 30], - "threshold": [0.5, 1.0, 1.5] - }, - "pullback_trading": { - "trend_period": [20, 50, 100], - "pullback_threshold": [0.02, 0.05, 0.1] - }, - "mean_reversion": { - "period": [10, 20, 30], - "deviation_threshold": [1.5, 2.0, 2.5] - } - }, - "asset_universes": { - "sp500_large_cap": { - "description": "S&P 500 Large Cap stocks", - "symbols": ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA", "META", "NVDA", "BRK-B", "JNJ", "V", "WMT", "JPM", "MA", "PG", "UNH", "DIS", "HD", "PYPL", "BAC", "NFLX", "ADBE", "CRM", "CMCSA", "XOM", "KO", "VZ", "ABT", "ABBV", "PFE", "TMO"], - "max_symbols": 100 - }, - "nasdaq_tech": { - "description": "NASDAQ Technology stocks", - "symbols": ["AAPL", "MSFT", "GOOGL", "AMZN", "TSLA", "META", "NVDA", "NFLX", "ADBE", "CRM", "INTC", "CSCO", "ORCL", "QCOM", "AMD", "AVGO", "TXN", "INTU", "ISRG", "AMGN"], - "max_symbols": 50 - }, - "forex_majors": { - "description": "Major forex pairs", - "symbols": ["EURUSD=X", "GBPUSD=X", "USDJPY=X", "AUDUSD=X", "USDCAD=X", "USDCHF=X", "NZDUSD=X"], - "max_symbols": 10 - }, - "crypto_major": { - "description": "Major cryptocurrencies", - "symbols": ["BTC-USD", "ETH-USD", "BNB-USD", "ADA-USD", "XRP-USD", "SOL-USD", "DOT-USD", "DOGE-USD", "AVAX-USD", "SHIB-USD"], - "max_symbols": 20 - }, - "commodities": { - "description": "Major commodities", - "symbols": ["GC=F", "SI=F", "CL=F", "NG=F", "ZC=F", "ZS=F", "ZW=F", "KC=F", "CC=F", "SB=F"], - "max_symbols": 15 - }, - "sector_etfs": { - "description": "Sector ETFs", - "symbols": ["XLK", "XLF", "XLV", "XLE", "XLI", "XLY", "XLP", "XLB", "XLU", "XLRE", "XLC"], - "max_symbols": 15 - } - }, - "reporting": { - "default_output_dir": "exports/reports", - "cache_reports": true, - "default_format": "html", - "include_charts_by_default": true, - "chart_theme": "plotly_white", - "export_formats": ["html", "json", "pdf"], - "auto_open_reports": false - }, - "risk_management": { - "max_drawdown_threshold": -20.0, - "min_sharpe_ratio": 0.5, - "max_leverage": 2.0, - "position_size_limits": { - "min_percentage": 0.01, - "max_percentage": 0.1 - }, - "correlation_threshold": 0.8 - }, - "performance": { - "parallel_processing": true, - "max_concurrent_downloads": 10, - "memory_monitoring": true, - "gc_frequency": 100, - "progress_reporting": true, - "log_level": "INFO", - "profiling_enabled": false - }, - "intervals": { - "supported": ["1m", "2m", "5m", "15m", "30m", "60m", "90m", "1h", "1d", "5d", "1wk", "1mo", "3mo"], - "default": "1d", - "intraday_limit_days": 60, - "daily_limit_years": 20 - }, - "validation": { - "min_data_points": 100, - "max_missing_data_percentage": 5.0, - "validate_ohlc_consistency": true, - "remove_outliers": true, - "outlier_threshold": 5.0 - } -} diff --git a/docker-compose.yml b/docker-compose.yml index 530cd38..03253d0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -41,6 +41,7 @@ services: - CACHE_DIR=${CACHE_DIR:-/app/cache} - LOG_LEVEL=${LOG_LEVEL:-INFO} - DATABASE_URL=${DATABASE_URL:-postgresql://quantuser:quantpass@postgres:5432/quant_system} + - TAILWIND_CSS_HREF=${TAILWIND_CSS_HREF:-exports/reports/assets/tailwind.min.css} - PYTHONPATH=/app - ALPHA_VANTAGE_API_KEY=${ALPHA_VANTAGE_API_KEY} - TWELVE_DATA_API_KEY=${TWELVE_DATA_API_KEY} @@ -54,18 +55,34 @@ services: - OPENAI_MODEL=${OPENAI_MODEL:-gpt-4o} - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - ANTHROPIC_MODEL=${ANTHROPIC_MODEL:-claude-3-5-sonnet-20241022} + - USE_REDIS_RECENT=${USE_REDIS_RECENT:-false} + - REDIS_URL=${REDIS_URL:-redis://redis:6379/0} volumes: - ./cache:/app/cache - ./exports:/app/exports - ./logs:/app/logs - ./config:/app/config:ro + - ./src:/app/src:ro + - ./scripts:/app/scripts:ro + - ./tests:/app/tests:ro - ./quant-strategies/algorithms/python:/app/external_strategies:ro + - ./artifacts:/app/artifacts depends_on: - postgres stdin_open: true tty: true command: ["bash"] + # Optional Redis for recent overlay cache (enable via profile) + redis: + image: redis:7-alpine + container_name: quant-redis + ports: + - "6379:6379" + command: ["redis-server", "--save", "", "--appendonly", "no"] + restart: unless-stopped + profiles: ["redis"] + networks: default: driver: bridge diff --git a/docs/cli-guide.md b/docs/cli-guide.md index dfab014..7833df8 100644 --- a/docs/cli-guide.md +++ b/docs/cli-guide.md @@ -1,8 +1,39 @@ # CLI Reference -Complete command-line interface reference for the Quant Trading System. +This guide documents the CLI. It includes a short section for the current unified CLI and a preserved legacy section for older multi-subcommand commands. -## Quick Start +Note: The current entrypoint focuses on the `collection` subcommand. Use the README for up-to-date commands. Legacy examples are kept below for context. + +## Current (Unified) CLI + +```bash +# Show help (inside Docker) +docker compose run --rm quant python -m src.cli.unified_cli --help + +# Run bonds collection (1d/max, all strategies) +docker compose run --rm -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --action direct --interval 1d --period max --strategies all --exports all + +# Dry run (plan only) + exports (csv, report, tradingview, ai or all) +docker compose run --rm -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --interval 1d --period max --strategies all --dry-run --exports all + +Exports and naming: +- CSV → `exports/csv///_Collection___.csv` +- Reports → `exports/reports///_Collection___.html` +- TV alerts → `exports/tv_alerts///_Collection___.md` +- AI recos (md/html) → `exports/ai_reco///_Collection___.*` + +When multiple intervals are used, filenames prefer `1d`. Use `--interval 1d` to constrain content and filenames. +``` + +## Legacy CLI (Preserved) + +These examples refer to a previous iteration of the CLI that exposed categories like `portfolio`, `data`, `cache`, and `reports`. Prefer the section above for current usage. + +### Quick Start (legacy) ```bash # Activate environment @@ -15,20 +46,20 @@ python -m src.cli.unified_cli portfolio list python -m src.cli.unified_cli portfolio test crypto --open-browser ``` -## Command Structure +### Command Structure (legacy) ``` python -m src.cli.unified_cli [options] ``` -## Portfolio Commands +### Portfolio Commands (legacy) -### List Portfolios +#### List Portfolios ```bash python -m src.cli.unified_cli portfolio list ``` -### Test Portfolio +#### Test Portfolio ```bash python -m src.cli.unified_cli portfolio test [options] @@ -39,7 +70,7 @@ Options: --open-browser Auto-open results in browser ``` -### Test All Strategies and Timeframes +#### Test All Strategies and Timeframes ```bash python -m src.cli.unified_cli portfolio test-all --symbols SYMBOL1,SYMBOL2 [options] @@ -50,9 +81,9 @@ Options: --strategies LIST Comma-separated strategies to test ``` -## Data Commands +### Data Commands (legacy) -### Download Data +#### Download Data ```bash python -m src.cli.unified_cli data download --symbols AAPL,GOOGL [options] @@ -63,21 +94,21 @@ Options: --source SOURCE Data source (yahoo, alpha_vantage, etc.) ``` -## Cache Commands +### Cache Commands (legacy) -### Cache Statistics +#### Cache Statistics ```bash python -m src.cli.unified_cli cache stats ``` -### Clear Cache +#### Clear Cache ```bash python -m src.cli.unified_cli cache clear [--all] [--symbol SYMBOL] ``` -## Report Commands +### Report Commands (legacy) -### Generate Reports +#### Generate Reports ```bash python -m src.cli.unified_cli reports generate [options] @@ -87,14 +118,14 @@ Options: --output-dir DIR Output directory ``` -### Organize Reports +#### Organize Reports ```bash python -m src.cli.unified_cli reports organize ``` -## Examples +### Examples (legacy) -### Test Crypto Portfolio +#### Test Crypto Portfolio ```bash # Using Sortino ratio (default - superior to Sharpe) python -m src.cli.unified_cli portfolio test crypto \ @@ -109,7 +140,7 @@ python -m src.cli.unified_cli portfolio test crypto \ --period 1y ``` -### Download Forex Data +#### Download Forex Data ```bash python -m src.cli.unified_cli data download \ --symbols EURUSD=X,GBPUSD=X \ @@ -117,7 +148,7 @@ python -m src.cli.unified_cli data download \ --source twelve_data ``` -### Daily Workflow +#### Daily Workflow ```bash # Check cache status python -m src.cli.unified_cli cache stats @@ -129,7 +160,7 @@ python -m src.cli.unified_cli portfolio test-all --metric sortino_ratio --period python -m src.cli.unified_cli reports organize ``` -## Configuration +## Configuration (legacy) Set environment variables in `.env`: ```bash @@ -139,7 +170,7 @@ DEFAULT_PERIOD=1y BROWSER_AUTO_OPEN=true ``` -## Help +## Help (legacy) Get help for any command: ```bash diff --git a/docs/development.md b/docs/development.md index 57d906c..3ed9d6c 100644 --- a/docs/development.md +++ b/docs/development.md @@ -38,6 +38,21 @@ pytest tests/test_data_manager.py pytest -n auto ``` +### Database for Tests +- By default, unit tests and CI use a lightweight SQLite database to avoid any external Postgres dependency. +- The unified DB models auto-detect CI/pytest and prefer SQLite when any of these env vars are present: `CI`, `PYTEST_CURRENT_TEST`, or `TESTING`. +- You can force this behavior explicitly by setting: + - `UNIFIED_MODELS_SQLITE=1` + - Optionally, also set `DATABASE_URL=sqlite:///quant_unified_test.db` for consistency. + +Examples: +```bash +# Local: force SQLite for tests +export UNIFIED_MODELS_SQLITE=1 +export DATABASE_URL=sqlite:///quant_unified_test.db +pytest +``` + ### Test Structure - `tests/test_*.py` - Unit tests - `tests/test_integration.py` - Integration tests @@ -87,7 +102,7 @@ tests/ └── conftest.py # Test configuration config/ -└── portfolios/ # Portfolio configurations +└── collections/ # Asset collections ``` ## 🔧 Development Commands @@ -100,11 +115,13 @@ docker build . # Build Docker image ### Running Services ```bash -# CLI commands -python -m src.cli.unified_cli portfolio list +# CLI discovery +python -m src.cli.unified_cli --help -# Docker development -docker-compose up --build +# Docker development (compose v2) +docker compose up -d postgres pgadmin +docker compose build quant +docker compose run --rm quant python -m src.cli.unified_cli --help ``` ## 📝 Contributing diff --git a/docs/docker.md b/docs/docker.md index 5b943ea..85b2db4 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -1,350 +1,95 @@ # Docker Guide -Guide for running the Quant Trading System with Docker. +Guide for running this repository with Docker Compose. This reflects the current compose file and unified CLI. -## Quick Start - -### Run the System -```bash -# Clone and setup -git clone https://github.com/LouisLetcher/quant-system.git -cd quant-system +## Services -# Copy environment file -cp .env.example .env -# Edit .env with your API keys - -# Run the system -docker-compose up quant-system -``` +From `docker-compose.yml`: -### Access Services -- **Web Interface**: http://localhost:8000 -- **API Documentation**: http://localhost:8000/docs -- **Jupyter Lab**: http://localhost:8888 (password: `quant`) +- `postgres` — PostgreSQL 15 (persisted via `postgres-data` volume, exposed on host `5433`). +- `pgadmin` — pgAdmin UI (exposed on host `5050`). +- `quant` — Application container (mounts source, strategies, cache, exports, logs, config, artifacts). -## Docker Compose Services - -### Core Services -```bash -# Main application -docker-compose up quant-system - -# Development environment -docker-compose up dev - -# Testing environment -docker-compose up test -``` +## Quick Start -### Extended Services ```bash -# With database -docker-compose --profile database up +# 1) Copy env and edit keys +cp .env.example .env -# With API server -docker-compose --profile api up +# 2) Start DB + pgAdmin (may pull images on first run) +docker compose up -d postgres pgadmin -# With monitoring -docker-compose --profile monitoring up +# 3) Build the app image +docker compose build quant -# Full stack -docker-compose --profile database --profile api --profile monitoring up +# 4) Show CLI help +docker compose run --rm quant python -m src.cli.unified_cli --help ``` -## Available Profiles - -### `dev` - Development -- Hot-reload enabled -- Debug logging -- Development dependencies -- Volume mounts for code - -### `test` - Testing -- Test environment -- Isolated test database -- Coverage reporting - -### `api` - Web API -- FastAPI server -- OpenAPI documentation -- REST endpoints - -### `database` - PostgreSQL -- Persistent data storage -- Automated backups -- Connection pooling +## Preferred Run: Bonds, 1d, Max, All Strategies -### `cache` - Redis -- Data caching -- Session storage -- Performance optimization - -### `monitoring` - Observability -- Prometheus metrics -- Grafana dashboards -- Health checks - -### `jupyter` - Analysis -- Jupyter Lab -- Data science tools -- Interactive analysis - -## Environment Configuration - -### Required Variables ```bash -# API Keys -ALPHA_VANTAGE_API_KEY=your_key -TWELVE_DATA_API_KEY=your_key -POLYGON_API_KEY=your_key - -# System Settings -LOG_LEVEL=INFO -CACHE_ENABLED=true +docker compose run --rm \ + -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --action direct \ + --interval 1d \ + --period max \ + --strategies all \ + --exports all \ + --log-level INFO ``` -### Database Configuration -```bash -# PostgreSQL -DATABASE_URL=postgresql://user:pass@postgres:5432/quant_db -POSTGRES_USER=quant_user -POSTGRES_PASSWORD=secure_password -POSTGRES_DB=quant_db -``` +## Dry Run (Plan Only) + Exports -### Monitoring Configuration ```bash -# Prometheus -PROMETHEUS_PORT=9090 - -# Grafana -GRAFANA_PORT=3000 -GRAFANA_ADMIN_PASSWORD=admin -``` - -## Volume Mounts - -The Docker setup includes several volume mounts: +docker compose run --rm \ + -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --interval 1d --period max --strategies all \ + --dry-run --exports all --log-level DEBUG -```yaml -volumes: - - ./config:/app/config # Configuration files - - ./cache:/app/cache # Data cache - - ./exports:/app/exports # Organized exports (reports/alerts by quarter) - - postgres_data:/var/lib/postgresql/data # Database - - redis_data:/data # Cache storage +Exports written under `exports/`: +- CSV → `exports/csv///...` +- Reports → `exports/reports///...` +- TV alerts → `exports/tv_alerts///...` +- AI recos (md/html/csv) → `exports/ai_reco///...` ``` -## Production Deployment +## Interactive Shell -### Build Production Image ```bash -# Build optimized image -docker build -t quant-system:prod . - -# Or use multi-stage build -docker build --target production -t quant-system:prod . +docker compose run --rm quant bash ``` -### Deploy with Docker Swarm -```bash -# Initialize swarm -docker swarm init +## Ports, Mounts, and Volumes -# Deploy stack -docker stack deploy -c docker-compose.yml quant-stack -``` +- Ports: Postgres `5433→5432`, pgAdmin `5050→80` +- Mounts (repo → container): + - `./cache` → `/app/cache` + - `./exports` → `/app/exports` + - `./logs` → `/app/logs` + - `./config` → `/app/config:ro` + - `./src` → `/app/src:ro` + - `./artifacts` → `/app/artifacts` + - `./quant-strategies/algorithms/python` → `/app/external_strategies:ro` +- Volume: `postgres-data` → `/var/lib/postgresql/data` -### Deploy with Kubernetes -```bash -# Convert docker-compose to k8s -kompose convert +## Environment -# Apply manifests -kubectl apply -f . -``` +- In-container DB: `DATABASE_URL=postgresql://quantuser:quantpass@postgres:5432/quant_system` +- Optional API keys: `ALPHA_VANTAGE_API_KEY`, `TWELVE_DATA_API_KEY`, `POLYGON_API_KEY`, `TIINGO_API_KEY`, `FINNHUB_API_KEY`, `BYBIT_API_KEY`, `BYBIT_API_SECRET`, `BYBIT_TESTNET` +- Optional LLMs: `OPENAI_API_KEY`, `OPENAI_MODEL`, `ANTHROPIC_API_KEY`, `ANTHROPIC_MODEL` -## Health Checks +## pgAdmin -Health checks are configured for all services: - -```bash -# Check service health -docker-compose ps - -# View logs -docker-compose logs quant-system - -# Execute health check manually -docker-compose exec quant-system python -c " -import requests -response = requests.get('http://localhost:8000/health') -print(response.status_code, response.json()) -" -``` +- Open `http://localhost:5050` +- Credentials from `.env` (`PGADMIN_DEFAULT_EMAIL`, `PGADMIN_DEFAULT_PASSWORD`) +- Register server: host `postgres`, port `5432`, DB `quant_system`, user `quantuser` ## Troubleshooting -### Common Issues - -1. **Port Conflicts** - ```bash - # Check port usage - lsof -i :8000 - - # Use different ports - export API_PORT=8001 - docker-compose up - ``` - -2. **Permission Issues** - ```bash - # Fix file permissions - sudo chown -R $USER:$USER ./cache ./exports - - # Or use Docker user - export UID=$(id -u) - export GID=$(id -g) - docker-compose up - ``` - -3. **Memory Issues** - ```bash - # Increase Docker memory limit - # Docker Desktop: Settings > Resources > Memory - - # Or limit container memory - docker-compose up --memory=2g - ``` - -4. **Database Connection** - ```bash - # Check database status - docker-compose logs postgres - - # Connect to database - docker-compose exec postgres psql -U quant_user -d quant_db - ``` - -### Debug Commands -```bash -# Enter container shell -docker-compose exec quant-system bash - -# View container logs -docker-compose logs -f quant-system - -# Check resource usage -docker stats - -# Inspect container -docker-compose exec quant-system python --version -docker-compose exec quant-system pip list -``` - -## Performance Optimization - -### Resource Limits -```yaml -services: - quant-system: - deploy: - resources: - limits: - memory: 2G - cpus: '1.0' - reservations: - memory: 1G - cpus: '0.5' -``` - -### Caching Strategy -- Use Redis for session/API caching -- Mount cache directory as volume -- Implement cache warming - -### Database Optimization -- Use connection pooling -- Optimize queries -- Regular maintenance - -## Monitoring and Logging - -### Prometheus Metrics -Access metrics at http://localhost:9090 - -Key metrics: -- API request duration -- Cache hit rate -- Database connections -- Memory usage - -### Grafana Dashboards -Access dashboards at http://localhost:3000 - -Default dashboards: -- System Overview -- API Performance -- Database Metrics -- Cache Performance - -### Log Aggregation -```bash -# View all logs -docker-compose logs - -# Follow specific service -docker-compose logs -f quant-system - -# Export logs -docker-compose logs > system.log -``` - -## Backup and Recovery - -### Database Backup -```bash -# Create backup -docker-compose exec postgres pg_dump -U quant_user quant_db > backup.sql - -# Restore backup -docker-compose exec -T postgres psql -U quant_user quant_db < backup.sql -``` - -### Data Backup -```bash -# Backup cache and reports -tar -czf backup.tar.gz cache/ exports/ config/ - -# Restore data -tar -xzf backup.tar.gz -``` - -## Security - -### Network Security -- Use internal networks -- Expose only necessary ports -- Implement SSL/TLS - -### Secrets Management -```bash -# Use Docker secrets -echo "api_key_value" | docker secret create alpha_vantage_key - - -# Or use external secret management -# - HashiCorp Vault -# - AWS Secrets Manager -# - Azure Key Vault -``` - -### Image Security -```bash -# Scan images for vulnerabilities -docker scan quant-system:latest - -# Use distroless base images -# Use multi-stage builds -# Regular security updates -``` +- Use singular subcommand `collection` (not `collections`). +- Ensure strategies are mounted and set `STRATEGIES_PATH=/app/external_strategies` when running. +- For timeouts/long runs, start with `--dry-run`, then narrow strategies/symbols or set `--max-workers` appropriately. +- See `docs/pgadmin-and-performance.md` for SQL queries, performance tuning, and psql connection strings. diff --git a/docs/features.md b/docs/features.md index b18fd8b..31c0566 100644 --- a/docs/features.md +++ b/docs/features.md @@ -1,5 +1,7 @@ # Comprehensive Features Overview +Note: Command examples in this document may use legacy CLI patterns (e.g., `portfolio` subcommands). For current usage, prefer the README and `collection` subcommand examples. + This document provides a complete overview of implemented and planned features in the Quant Trading System. ## ✅ Core Features (Implemented) @@ -17,15 +19,17 @@ This document provides a complete overview of implemented and planned features i - ✅ Cache management for faster repeated analysis - ✅ Support for crypto, forex, and traditional assets -**Usage**: +**Usage (current CLI)**: ```bash -# Single backtest -docker-compose run --rm quant python -m src.cli.unified_cli portfolio backtest \ - --symbols BTCUSDT ETHUSDT --strategy BuyAndHold --start-date 2023-01-01 - -# Test all portfolios -docker-compose run --rm quant python -m src.cli.unified_cli portfolio test-all \ - --portfolio config/portfolios/crypto.json --metric sortino_ratio +# Preferred run: Bonds collection, 1d interval, max period, all strategies +docker compose run --rm -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --action direct --interval 1d --period max --strategies all --exports all --log-level INFO + +# Dry run (plan only) + exports from DB +docker compose run --rm -e STRATEGIES_PATH=/app/external_strategies \ + quant python -m src.cli.unified_cli collection bonds \ + --interval 1d --period max --strategies all --dry-run --exports all --log-level DEBUG ``` ### 2. Portfolio Management & Configuration @@ -77,22 +81,21 @@ docker-compose run --rm quant python -m src.cli.unified_cli portfolio test-all \ ### 6. TradingView Alert Export **Status**: ✅ **IMPLEMENTED** -**Description**: Export trading alerts from quarterly portfolio reports with TradingView placeholders. +**Description**: Export trading alerts directly from the database (best strategies), with TradingView placeholders. **Features**: -- ✅ Auto-organized quarterly export structure (`exports/tradingview_alerts/YYYY/QX/`) -- ✅ Strategy and timeframe extraction from HTML reports +- ✅ Auto-organized quarterly export structure (`exports/tv_alerts/YYYY/QX/`) +- ✅ DB-backed (no HTML scraping) - ✅ TradingView placeholders (`{{close}}`, `{{timenow}}`, `{{strategy.order.action}}`) - ✅ Performance metrics integration (Sharpe, profit, win rate) +- ✅ Collection/portfolio filtering (`--collection commodities`, `--collection bonds`) - ✅ Symbol-specific filtering and export options -**Usage**: +**Usage (current CLI)**: ```bash -# Auto-organized by quarter/year -poetry run python src/utils/tradingview_alert_exporter.py --output "alerts.md" - -# Export for specific symbol -poetry run python src/utils/tradingview_alert_exporter.py --symbol BTCUSDT +# Generate TradingView alerts from DB (no backtests) +docker compose run --rm \ + quant python -m src.cli.unified_cli collection bonds --dry-run --exports tradingview ``` ### 7. Docker Infrastructure @@ -121,49 +124,35 @@ poetry run python src/utils/tradingview_alert_exporter.py --symbol BTCUSDT ### 9. CSV Export **Status**: ✅ **IMPLEMENTED** -**Description**: Export portfolio data with best strategies and timeframes from existing quarterly reports. +**Description**: Export portfolio data with best strategies and timeframes directly from the database. **Features**: - ✅ CSV export with symbol, best strategy, best timeframe, and performance metrics -- ✅ Bulk export for all assets from quarterly reports +- ✅ Bulk export for all assets from the database - ✅ **Separate CSV files for each portfolio** (Crypto, Bonds, Forex, Stocks, etc.) - ✅ Customizable column selection (Sharpe, Sortino, profit, drawdown) - ✅ Integration with existing quarterly report structure -- ✅ Organized quarterly directory structure (`exports/data_exports/YYYY/QX/`) -- ✅ HTML report parsing without re-running backtests -- ✅ Maintains same file naming as HTML reports +- ✅ Organized quarterly directory structure (`exports/csv/YYYY/QX/`) +- ✅ Unified naming with HTML/TV/AI exports -**Usage**: +**Usage (current CLI)**: ```bash -# Export best strategies from quarterly reports -docker-compose run --rm quant python -m src.cli.unified_cli reports export-csv \ - --format best-strategies --quarter Q3 --year 2025 - -# Export full performance data from quarterly reports -docker-compose run --rm quant python -m src.cli.unified_cli reports export-csv \ - --format quarterly --quarter Q3 --year 2025 +# Export CSV directly from DB for bonds (no backtests) +docker compose run --rm \ + quant python -m src.cli.unified_cli collection bonds --dry-run --exports csv -# Show available columns -docker-compose run --rm quant python -m src.cli.unified_cli reports export-csv \ - --columns available +# Export CSV + HTML report + TradingView alerts +docker compose run --rm \ + quant python -m src.cli.unified_cli collection bonds --dry-run --exports csv,report,tradingview,ai ``` --- ## 🎯 High Priority Features (Planned) -### 1. AI Investment Recommendations -**Status**: ✅ **IMPLEMENTED** -**Description**: AI-powered analysis of backtest results to recommend optimal asset allocation and investment decisions. - -**Features**: -- **Performance-based scoring** - Analyze Sortino ratio, Calmar ratio, and profit factors across all assets -- **Risk-adjusted recommendations** - Consider volatility, maximum drawdown, and recovery periods -- **Portfolio correlation analysis** - Identify diversification opportunities and avoid over-concentration -- **Strategy-asset matching** - Recommend best strategy-timeframe combinations for each asset -- **Investment allocation suggestions** - Propose percentage allocations based on risk tolerance -- **Red flag detection** - Warn against assets with poor historical performance or high risk -- **Confidence scoring** - Rate recommendation confidence based on data quality and consistency +### 1. Walk-Forward + Out-of-Sample Validation +- Rolling window backtests, expanding windows, and out-of-sample validation reports. +- Parameter stability plots; highlight overfitting risk. ### 2. Enhanced Data Sources **Status**: 🔄 **PLANNED** diff --git a/docs/pgadmin-and-performance.md b/docs/pgadmin-and-performance.md new file mode 100644 index 0000000..10a5913 --- /dev/null +++ b/docs/pgadmin-and-performance.md @@ -0,0 +1,81 @@ +# DB Inspection (pgAdmin) and Run Performance Tips + +## pgAdmin: Connect and Inspect + +- Login: open http://localhost:5050 and use `PGADMIN_DEFAULT_EMAIL` / `PGADMIN_DEFAULT_PASSWORD` from `.env`. +- Register server (first time): + - Name: `quant-local` + - Hostname/address: `postgres` + - Port: `5432` + - Maintenance DB: `quant_system` + - Username: `quantuser` + - Password: `quantpass` + +### Handy Queries + +- Recent runs (most recent first): +```sql +SELECT run_id, started_at_utc, action, collection_ref, + strategies_mode, intervals_mode, target_metric, period_mode, + status, plan_hash +FROM runs +ORDER BY started_at_utc DESC +LIMIT 50; +``` + +- Find a run by plan_hash: +```sql +SELECT * +FROM runs +WHERE plan_hash = ''; +``` + +- Count backtest results per run: +```sql +SELECT run_id, COUNT(*) AS results +FROM backtest_results +GROUP BY run_id +ORDER BY results DESC; +``` + +- Best strategies for 1d timeframe (top by Sortino): +```sql +SELECT symbol, timeframe, strategy, + COALESCE(sortino_ratio::float, 0) AS sortino_ratio, + COALESCE(total_return::float, 0) AS total_return, + COALESCE(max_drawdown::float, 0) AS max_drawdown, + updated_at +FROM best_strategies +WHERE timeframe = '1d' +ORDER BY sortino_ratio DESC +LIMIT 50; +``` + +- Latest results for a symbol (e.g., TLT): +```sql +SELECT symbol, strategy, interval, start_at_utc, end_at_utc, metrics, engine_ctx +FROM backtest_results +WHERE symbol = 'TLT' +ORDER BY end_at_utc DESC NULLS LAST +LIMIT 5; +``` + +## Speeding Up Runs + +- Limit strategies: pass `--strategies RSI,BollingerBands,Breakout` instead of `all`. +- Limit symbols: create a small collection JSON (3–5 symbols) for iteration. +- Fix interval: keep `--interval 1d` during development. +- Concurrency: use `--max-workers 4` (or higher if CPU allows). Monitor memory. +- Validate plan first: add `--dry-run` to print the manifest before running. +- Re-run same plan: use `--force` if you need to execute a plan with the same `plan_hash` again. +- Data/API constraints: provide API keys in `.env` to reduce throttling and widen history where providers allow. + +## Paths & Artifacts + +- Artifacts: `artifacts/run_/` (manifest, summaries, exports if enabled). +- Exports: `exports/` (CSV, HTML reports, TradingView), organized by quarter in some flows. + +## Connections (psql) + +- Inside container: `DATABASE_URL=postgresql://quantuser:quantpass@postgres:5432/quant_system` +- From host: `psql postgresql://quantuser:quantpass@localhost:5433/quant_system` diff --git a/poetry.lock b/poetry.lock index 79b64bd..aa8d470 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. [[package]] name = "annotated-types" @@ -209,14 +209,14 @@ tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" a [[package]] name = "authlib" -version = "1.6.1" +version = "1.6.3" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = false python-versions = ">=3.9" groups = ["security"] files = [ - {file = "authlib-1.6.1-py2.py3-none-any.whl", hash = "sha256:e9d2031c34c6309373ab845afc24168fe9e93dc52d252631f52642f21f5ed06e"}, - {file = "authlib-1.6.1.tar.gz", hash = "sha256:4dffdbb1460ba6ec8c17981a4c67af7d8af131231b5a36a88a1e8c80c111cdfd"}, + {file = "authlib-1.6.3-py2.py3-none-any.whl", hash = "sha256:7ea0f082edd95a03b7b72edac65ec7f8f68d703017d7e37573aee4fc603f2a48"}, + {file = "authlib-1.6.3.tar.gz", hash = "sha256:9f7a982cc395de719e4c2215c5707e7ea690ecf84f1ab126f28c053f4219e610"}, ] [package.dependencies] @@ -286,14 +286,14 @@ yaml = ["PyYAML"] [[package]] name = "beautifulsoup4" -version = "4.13.4" +version = "4.13.5" description = "Screen-scraping library" optional = false python-versions = ">=3.7.0" groups = ["main", "jupyter"] files = [ - {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, - {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, + {file = "beautifulsoup4-4.13.5-py3-none-any.whl", hash = "sha256:642085eaa22233aceadff9c69651bc51e8bf3f874fb6d7104ece2beb24b47c4a"}, + {file = "beautifulsoup4-4.13.5.tar.gz", hash = "sha256:5e70131382930e7c3de33450a2f54a63d5e4b19386eab43a5b34d594268f3695"}, ] [package.dependencies] @@ -457,104 +457,91 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.4.2" +version = "3.4.3" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7" groups = ["main", "jupyter", "security"] files = [ - {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, - {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, - {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win32.whl", hash = "sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win32.whl", hash = "sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca"}, + {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"}, + {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"}, ] [[package]] @@ -694,100 +681,100 @@ test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist" [[package]] name = "coverage" -version = "7.10.2" +version = "7.10.5" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "coverage-7.10.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:79f0283ab5e6499fd5fe382ca3d62afa40fb50ff227676a3125d18af70eabf65"}, - {file = "coverage-7.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4545e906f595ee8ab8e03e21be20d899bfc06647925bc5b224ad7e8c40e08b8"}, - {file = "coverage-7.10.2-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ae385e1d58fbc6a9b1c315e5510ac52281e271478b45f92ca9b5ad42cf39643f"}, - {file = "coverage-7.10.2-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6f0cbe5f7dd19f3a32bac2251b95d51c3b89621ac88a2648096ce40f9a5aa1e7"}, - {file = "coverage-7.10.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:fd17f427f041f6b116dc90b4049c6f3e1230524407d00daa2d8c7915037b5947"}, - {file = "coverage-7.10.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7f10ca4cde7b466405cce0a0e9971a13eb22e57a5ecc8b5f93a81090cc9c7eb9"}, - {file = "coverage-7.10.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3b990df23dd51dccce26d18fb09fd85a77ebe46368f387b0ffba7a74e470b31b"}, - {file = "coverage-7.10.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc3902584d25c7eef57fb38f440aa849a26a3a9f761a029a72b69acfca4e31f8"}, - {file = "coverage-7.10.2-cp310-cp310-win32.whl", hash = "sha256:9dd37e9ac00d5eb72f38ed93e3cdf2280b1dbda3bb9b48c6941805f265ad8d87"}, - {file = "coverage-7.10.2-cp310-cp310-win_amd64.whl", hash = "sha256:99d16f15cb5baf0729354c5bd3080ae53847a4072b9ba1e10957522fb290417f"}, - {file = "coverage-7.10.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c3b210d79925a476dfc8d74c7d53224888421edebf3a611f3adae923e212b27"}, - {file = "coverage-7.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bf67d1787cd317c3f8b2e4c6ed1ae93497be7e30605a0d32237ac37a37a8a322"}, - {file = "coverage-7.10.2-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:069b779d03d458602bc0e27189876e7d8bdf6b24ac0f12900de22dd2154e6ad7"}, - {file = "coverage-7.10.2-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:4c2de4cb80b9990e71c62c2d3e9f3ec71b804b1f9ca4784ec7e74127e0f42468"}, - {file = "coverage-7.10.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:75bf7ab2374a7eb107602f1e07310cda164016cd60968abf817b7a0b5703e288"}, - {file = "coverage-7.10.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3f37516458ec1550815134937f73d6d15b434059cd10f64678a2068f65c62406"}, - {file = "coverage-7.10.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:de3c6271c482c250d3303fb5c6bdb8ca025fff20a67245e1425df04dc990ece9"}, - {file = "coverage-7.10.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:98a838101321ac3089c9bb1d4bfa967e8afed58021fda72d7880dc1997f20ae1"}, - {file = "coverage-7.10.2-cp311-cp311-win32.whl", hash = "sha256:f2a79145a531a0e42df32d37be5af069b4a914845b6f686590739b786f2f7bce"}, - {file = "coverage-7.10.2-cp311-cp311-win_amd64.whl", hash = "sha256:e4f5f1320f8ee0d7cfa421ceb257bef9d39fd614dd3ddcfcacd284d4824ed2c2"}, - {file = "coverage-7.10.2-cp311-cp311-win_arm64.whl", hash = "sha256:d8f2d83118f25328552c728b8e91babf93217db259ca5c2cd4dd4220b8926293"}, - {file = "coverage-7.10.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:890ad3a26da9ec7bf69255b9371800e2a8da9bc223ae5d86daeb940b42247c83"}, - {file = "coverage-7.10.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38fd1ccfca7838c031d7a7874d4353e2f1b98eb5d2a80a2fe5732d542ae25e9c"}, - {file = "coverage-7.10.2-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:76c1ffaaf4f6f0f6e8e9ca06f24bb6454a7a5d4ced97a1bc466f0d6baf4bd518"}, - {file = "coverage-7.10.2-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:86da8a3a84b79ead5c7d0e960c34f580bc3b231bb546627773a3f53c532c2f21"}, - {file = "coverage-7.10.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99cef9731c8a39801830a604cc53c93c9e57ea8b44953d26589499eded9576e0"}, - {file = "coverage-7.10.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ea58b112f2966a8b91eb13f5d3b1f8bb43c180d624cd3283fb33b1cedcc2dd75"}, - {file = "coverage-7.10.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:20f405188d28da9522b7232e51154e1b884fc18d0b3a10f382d54784715bbe01"}, - {file = "coverage-7.10.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:64586ce42bbe0da4d9f76f97235c545d1abb9b25985a8791857690f96e23dc3b"}, - {file = "coverage-7.10.2-cp312-cp312-win32.whl", hash = "sha256:bc2e69b795d97ee6d126e7e22e78a509438b46be6ff44f4dccbb5230f550d340"}, - {file = "coverage-7.10.2-cp312-cp312-win_amd64.whl", hash = "sha256:adda2268b8cf0d11f160fad3743b4dfe9813cd6ecf02c1d6397eceaa5b45b388"}, - {file = "coverage-7.10.2-cp312-cp312-win_arm64.whl", hash = "sha256:164429decd0d6b39a0582eaa30c67bf482612c0330572343042d0ed9e7f15c20"}, - {file = "coverage-7.10.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:aca7b5645afa688de6d4f8e89d30c577f62956fefb1bad021490d63173874186"}, - {file = "coverage-7.10.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:96e5921342574a14303dfdb73de0019e1ac041c863743c8fe1aa6c2b4a257226"}, - {file = "coverage-7.10.2-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:11333094c1bff621aa811b67ed794865cbcaa99984dedea4bd9cf780ad64ecba"}, - {file = "coverage-7.10.2-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6eb586fa7d2aee8d65d5ae1dd71414020b2f447435c57ee8de8abea0a77d5074"}, - {file = "coverage-7.10.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2d358f259d8019d4ef25d8c5b78aca4c7af25e28bd4231312911c22a0e824a57"}, - {file = "coverage-7.10.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5250bda76e30382e0a2dcd68d961afcab92c3a7613606e6269855c6979a1b0bb"}, - {file = "coverage-7.10.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a91e027d66eff214d88d9afbe528e21c9ef1ecdf4956c46e366c50f3094696d0"}, - {file = "coverage-7.10.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:228946da741558904e2c03ce870ba5efd9cd6e48cbc004d9a27abee08100a15a"}, - {file = "coverage-7.10.2-cp313-cp313-win32.whl", hash = "sha256:95e23987b52d02e7c413bf2d6dc6288bd5721beb518052109a13bfdc62c8033b"}, - {file = "coverage-7.10.2-cp313-cp313-win_amd64.whl", hash = "sha256:f35481d42c6d146d48ec92d4e239c23f97b53a3f1fbd2302e7c64336f28641fe"}, - {file = "coverage-7.10.2-cp313-cp313-win_arm64.whl", hash = "sha256:65b451949cb789c346f9f9002441fc934d8ccedcc9ec09daabc2139ad13853f7"}, - {file = "coverage-7.10.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:e8415918856a3e7d57a4e0ad94651b761317de459eb74d34cc1bb51aad80f07e"}, - {file = "coverage-7.10.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f287a25a8ca53901c613498e4a40885b19361a2fe8fbfdbb7f8ef2cad2a23f03"}, - {file = "coverage-7.10.2-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:75cc1a3f8c88c69bf16a871dab1fe5a7303fdb1e9f285f204b60f1ee539b8fc0"}, - {file = "coverage-7.10.2-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ca07fa78cc9d26bc8c4740de1abd3489cf9c47cc06d9a8ab3d552ff5101af4c0"}, - {file = "coverage-7.10.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c2e117e64c26300032755d4520cd769f2623cde1a1d1c3515b05a3b8add0ade1"}, - {file = "coverage-7.10.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:daaf98009977f577b71f8800208f4d40d4dcf5c2db53d4d822787cdc198d76e1"}, - {file = "coverage-7.10.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:ea8d8fe546c528535c761ba424410bbeb36ba8a0f24be653e94b70c93fd8a8ca"}, - {file = "coverage-7.10.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fe024d40ac31eb8d5aae70215b41dafa264676caa4404ae155f77d2fa95c37bb"}, - {file = "coverage-7.10.2-cp313-cp313t-win32.whl", hash = "sha256:8f34b09f68bdadec122ffad312154eda965ade433559cc1eadd96cca3de5c824"}, - {file = "coverage-7.10.2-cp313-cp313t-win_amd64.whl", hash = "sha256:71d40b3ac0f26fa9ffa6ee16219a714fed5c6ec197cdcd2018904ab5e75bcfa3"}, - {file = "coverage-7.10.2-cp313-cp313t-win_arm64.whl", hash = "sha256:abb57fdd38bf6f7dcc66b38dafb7af7c5fdc31ac6029ce373a6f7f5331d6f60f"}, - {file = "coverage-7.10.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a3e853cc04987c85ec410905667eed4bf08b1d84d80dfab2684bb250ac8da4f6"}, - {file = "coverage-7.10.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0100b19f230df72c90fdb36db59d3f39232391e8d89616a7de30f677da4f532b"}, - {file = "coverage-7.10.2-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9c1cd71483ea78331bdfadb8dcec4f4edfb73c7002c1206d8e0af6797853f5be"}, - {file = "coverage-7.10.2-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9f75dbf4899e29a37d74f48342f29279391668ef625fdac6d2f67363518056a1"}, - {file = "coverage-7.10.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a7df481e7508de1c38b9b8043da48d94931aefa3e32b47dd20277e4978ed5b95"}, - {file = "coverage-7.10.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:835f39e618099325e7612b3406f57af30ab0a0af350490eff6421e2e5f608e46"}, - {file = "coverage-7.10.2-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:12e52b5aa00aa720097d6947d2eb9e404e7c1101ad775f9661ba165ed0a28303"}, - {file = "coverage-7.10.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:718044729bf1fe3e9eb9f31b52e44ddae07e434ec050c8c628bf5adc56fe4bdd"}, - {file = "coverage-7.10.2-cp314-cp314-win32.whl", hash = "sha256:f256173b48cc68486299d510a3e729a96e62c889703807482dbf56946befb5c8"}, - {file = "coverage-7.10.2-cp314-cp314-win_amd64.whl", hash = "sha256:2e980e4179f33d9b65ac4acb86c9c0dde904098853f27f289766657ed16e07b3"}, - {file = "coverage-7.10.2-cp314-cp314-win_arm64.whl", hash = "sha256:14fb5b6641ab5b3c4161572579f0f2ea8834f9d3af2f7dd8fbaecd58ef9175cc"}, - {file = "coverage-7.10.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e96649ac34a3d0e6491e82a2af71098e43be2874b619547c3282fc11d3840a4b"}, - {file = "coverage-7.10.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1a2e934e9da26341d342d30bfe91422bbfdb3f1f069ec87f19b2909d10d8dcc4"}, - {file = "coverage-7.10.2-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:651015dcd5fd9b5a51ca79ece60d353cacc5beaf304db750407b29c89f72fe2b"}, - {file = "coverage-7.10.2-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:81bf6a32212f9f66da03d63ecb9cd9bd48e662050a937db7199dbf47d19831de"}, - {file = "coverage-7.10.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d800705f6951f75a905ea6feb03fff8f3ea3468b81e7563373ddc29aa3e5d1ca"}, - {file = "coverage-7.10.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:248b5394718e10d067354448dc406d651709c6765669679311170da18e0e9af8"}, - {file = "coverage-7.10.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:5c61675a922b569137cf943770d7ad3edd0202d992ce53ac328c5ff68213ccf4"}, - {file = "coverage-7.10.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:52d708b5fd65589461381fa442d9905f5903d76c086c6a4108e8e9efdca7a7ed"}, - {file = "coverage-7.10.2-cp314-cp314t-win32.whl", hash = "sha256:916369b3b914186b2c5e5ad2f7264b02cff5df96cdd7cdad65dccd39aa5fd9f0"}, - {file = "coverage-7.10.2-cp314-cp314t-win_amd64.whl", hash = "sha256:5b9d538e8e04916a5df63052d698b30c74eb0174f2ca9cd942c981f274a18eaf"}, - {file = "coverage-7.10.2-cp314-cp314t-win_arm64.whl", hash = "sha256:04c74f9ef1f925456a9fd23a7eef1103126186d0500ef9a0acb0bd2514bdc7cc"}, - {file = "coverage-7.10.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:765b13b164685a2f8b2abef867ad07aebedc0e090c757958a186f64e39d63dbd"}, - {file = "coverage-7.10.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a219b70100500d0c7fd3ebb824a3302efb6b1a122baa9d4eb3f43df8f0b3d899"}, - {file = "coverage-7.10.2-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:e33e79a219105aa315439ee051bd50b6caa705dc4164a5aba6932c8ac3ce2d98"}, - {file = "coverage-7.10.2-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bc3945b7bad33957a9eca16e9e5eae4b17cb03173ef594fdaad228f4fc7da53b"}, - {file = "coverage-7.10.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9bdff88e858ee608a924acfad32a180d2bf6e13e059d6a7174abbae075f30436"}, - {file = "coverage-7.10.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44329cbed24966c0b49acb386352c9722219af1f0c80db7f218af7793d251902"}, - {file = "coverage-7.10.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:be127f292496d0fbe20d8025f73221b36117b3587f890346e80a13b310712982"}, - {file = "coverage-7.10.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6c031da749a05f7a01447dd7f47beedb498edd293e31e1878c0d52db18787df0"}, - {file = "coverage-7.10.2-cp39-cp39-win32.whl", hash = "sha256:22aca3e691c7709c5999ccf48b7a8ff5cf5a8bd6fe9b36efbd4993f5a36b2fcf"}, - {file = "coverage-7.10.2-cp39-cp39-win_amd64.whl", hash = "sha256:c7195444b932356055a8e287fa910bf9753a84a1bc33aeb3770e8fca521e032e"}, - {file = "coverage-7.10.2-py3-none-any.whl", hash = "sha256:95db3750dd2e6e93d99fa2498f3a1580581e49c494bddccc6f85c5c21604921f"}, - {file = "coverage-7.10.2.tar.gz", hash = "sha256:5d6e6d84e6dd31a8ded64759626627247d676a23c1b892e1326f7c55c8d61055"}, + {file = "coverage-7.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c6a5c3414bfc7451b879141ce772c546985163cf553f08e0f135f0699a911801"}, + {file = "coverage-7.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bc8e4d99ce82f1710cc3c125adc30fd1487d3cf6c2cd4994d78d68a47b16989a"}, + {file = "coverage-7.10.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:02252dc1216e512a9311f596b3169fad54abcb13827a8d76d5630c798a50a754"}, + {file = "coverage-7.10.5-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73269df37883e02d460bee0cc16be90509faea1e3bd105d77360b512d5bb9c33"}, + {file = "coverage-7.10.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f8a81b0614642f91c9effd53eec284f965577591f51f547a1cbeb32035b4c2f"}, + {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6a29f8e0adb7f8c2b95fa2d4566a1d6e6722e0a637634c6563cb1ab844427dd9"}, + {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fcf6ab569436b4a647d4e91accba12509ad9f2554bc93d3aee23cc596e7f99c3"}, + {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:90dc3d6fb222b194a5de60af8d190bedeeddcbc7add317e4a3cd333ee6b7c879"}, + {file = "coverage-7.10.5-cp310-cp310-win32.whl", hash = "sha256:414a568cd545f9dc75f0686a0049393de8098414b58ea071e03395505b73d7a8"}, + {file = "coverage-7.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:e551f9d03347196271935fd3c0c165f0e8c049220280c1120de0084d65e9c7ff"}, + {file = "coverage-7.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c177e6ffe2ebc7c410785307758ee21258aa8e8092b44d09a2da767834f075f2"}, + {file = "coverage-7.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:14d6071c51ad0f703d6440827eaa46386169b5fdced42631d5a5ac419616046f"}, + {file = "coverage-7.10.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:61f78c7c3bc272a410c5ae3fde7792b4ffb4acc03d35a7df73ca8978826bb7ab"}, + {file = "coverage-7.10.5-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f39071caa126f69d63f99b324fb08c7b1da2ec28cbb1fe7b5b1799926492f65c"}, + {file = "coverage-7.10.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:343a023193f04d46edc46b2616cdbee68c94dd10208ecd3adc56fcc54ef2baa1"}, + {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:585ffe93ae5894d1ebdee69fc0b0d4b7c75d8007983692fb300ac98eed146f78"}, + {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0ef4e66f006ed181df29b59921bd8fc7ed7cd6a9289295cd8b2824b49b570df"}, + {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eb7b0bbf7cc1d0453b843eca7b5fa017874735bef9bfdfa4121373d2cc885ed6"}, + {file = "coverage-7.10.5-cp311-cp311-win32.whl", hash = "sha256:1d043a8a06987cc0c98516e57c4d3fc2c1591364831e9deb59c9e1b4937e8caf"}, + {file = "coverage-7.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:fefafcca09c3ac56372ef64a40f5fe17c5592fab906e0fdffd09543f3012ba50"}, + {file = "coverage-7.10.5-cp311-cp311-win_arm64.whl", hash = "sha256:7e78b767da8b5fc5b2faa69bb001edafcd6f3995b42a331c53ef9572c55ceb82"}, + {file = "coverage-7.10.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c2d05c7e73c60a4cecc7d9b60dbfd603b4ebc0adafaef371445b47d0f805c8a9"}, + {file = "coverage-7.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32ddaa3b2c509778ed5373b177eb2bf5662405493baeff52278a0b4f9415188b"}, + {file = "coverage-7.10.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dd382410039fe062097aa0292ab6335a3f1e7af7bba2ef8d27dcda484918f20c"}, + {file = "coverage-7.10.5-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7fa22800f3908df31cea6fb230f20ac49e343515d968cc3a42b30d5c3ebf9b5a"}, + {file = "coverage-7.10.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f366a57ac81f5e12797136552f5b7502fa053c861a009b91b80ed51f2ce651c6"}, + {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1dc8f1980a272ad4a6c84cba7981792344dad33bf5869361576b7aef42733a"}, + {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2285c04ee8676f7938b02b4936d9b9b672064daab3187c20f73a55f3d70e6b4a"}, + {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c2492e4dd9daab63f5f56286f8a04c51323d237631eb98505d87e4c4ff19ec34"}, + {file = "coverage-7.10.5-cp312-cp312-win32.whl", hash = "sha256:38a9109c4ee8135d5df5505384fc2f20287a47ccbe0b3f04c53c9a1989c2bbaf"}, + {file = "coverage-7.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:6b87f1ad60b30bc3c43c66afa7db6b22a3109902e28c5094957626a0143a001f"}, + {file = "coverage-7.10.5-cp312-cp312-win_arm64.whl", hash = "sha256:672a6c1da5aea6c629819a0e1461e89d244f78d7b60c424ecf4f1f2556c041d8"}, + {file = "coverage-7.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ef3b83594d933020f54cf65ea1f4405d1f4e41a009c46df629dd964fcb6e907c"}, + {file = "coverage-7.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2b96bfdf7c0ea9faebce088a3ecb2382819da4fbc05c7b80040dbc428df6af44"}, + {file = "coverage-7.10.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:63df1fdaffa42d914d5c4d293e838937638bf75c794cf20bee12978fc8c4e3bc"}, + {file = "coverage-7.10.5-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8002dc6a049aac0e81ecec97abfb08c01ef0c1fbf962d0c98da3950ace89b869"}, + {file = "coverage-7.10.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63d4bb2966d6f5f705a6b0c6784c8969c468dbc4bcf9d9ded8bff1c7e092451f"}, + {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1f672efc0731a6846b157389b6e6d5d5e9e59d1d1a23a5c66a99fd58339914d5"}, + {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3f39cef43d08049e8afc1fde4a5da8510fc6be843f8dea350ee46e2a26b2f54c"}, + {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2968647e3ed5a6c019a419264386b013979ff1fb67dd11f5c9886c43d6a31fc2"}, + {file = "coverage-7.10.5-cp313-cp313-win32.whl", hash = "sha256:0d511dda38595b2b6934c2b730a1fd57a3635c6aa2a04cb74714cdfdd53846f4"}, + {file = "coverage-7.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:9a86281794a393513cf117177fd39c796b3f8e3759bb2764259a2abba5cce54b"}, + {file = "coverage-7.10.5-cp313-cp313-win_arm64.whl", hash = "sha256:cebd8e906eb98bb09c10d1feed16096700b1198d482267f8bf0474e63a7b8d84"}, + {file = "coverage-7.10.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0520dff502da5e09d0d20781df74d8189ab334a1e40d5bafe2efaa4158e2d9e7"}, + {file = "coverage-7.10.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d9cd64aca68f503ed3f1f18c7c9174cbb797baba02ca8ab5112f9d1c0328cd4b"}, + {file = "coverage-7.10.5-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0913dd1613a33b13c4f84aa6e3f4198c1a21ee28ccb4f674985c1f22109f0aae"}, + {file = "coverage-7.10.5-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1b7181c0feeb06ed8a02da02792f42f829a7b29990fef52eff257fef0885d760"}, + {file = "coverage-7.10.5-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36d42b7396b605f774d4372dd9c49bed71cbabce4ae1ccd074d155709dd8f235"}, + {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b4fdc777e05c4940b297bf47bf7eedd56a39a61dc23ba798e4b830d585486ca5"}, + {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:42144e8e346de44a6f1dbd0a56575dd8ab8dfa7e9007da02ea5b1c30ab33a7db"}, + {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:66c644cbd7aed8fe266d5917e2c9f65458a51cfe5eeff9c05f15b335f697066e"}, + {file = "coverage-7.10.5-cp313-cp313t-win32.whl", hash = "sha256:2d1b73023854068c44b0c554578a4e1ef1b050ed07cf8b431549e624a29a66ee"}, + {file = "coverage-7.10.5-cp313-cp313t-win_amd64.whl", hash = "sha256:54a1532c8a642d8cc0bd5a9a51f5a9dcc440294fd06e9dda55e743c5ec1a8f14"}, + {file = "coverage-7.10.5-cp313-cp313t-win_arm64.whl", hash = "sha256:74d5b63fe3f5f5d372253a4ef92492c11a4305f3550631beaa432fc9df16fcff"}, + {file = "coverage-7.10.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:68c5e0bc5f44f68053369fa0d94459c84548a77660a5f2561c5e5f1e3bed7031"}, + {file = "coverage-7.10.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cf33134ffae93865e32e1e37df043bef15a5e857d8caebc0099d225c579b0fa3"}, + {file = "coverage-7.10.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ad8fa9d5193bafcf668231294241302b5e683a0518bf1e33a9a0dfb142ec3031"}, + {file = "coverage-7.10.5-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:146fa1531973d38ab4b689bc764592fe6c2f913e7e80a39e7eeafd11f0ef6db2"}, + {file = "coverage-7.10.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6013a37b8a4854c478d3219ee8bc2392dea51602dd0803a12d6f6182a0061762"}, + {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:eb90fe20db9c3d930fa2ad7a308207ab5b86bf6a76f54ab6a40be4012d88fcae"}, + {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:384b34482272e960c438703cafe63316dfbea124ac62006a455c8410bf2a2262"}, + {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:467dc74bd0a1a7de2bedf8deaf6811f43602cb532bd34d81ffd6038d6d8abe99"}, + {file = "coverage-7.10.5-cp314-cp314-win32.whl", hash = "sha256:556d23d4e6393ca898b2e63a5bca91e9ac2d5fb13299ec286cd69a09a7187fde"}, + {file = "coverage-7.10.5-cp314-cp314-win_amd64.whl", hash = "sha256:f4446a9547681533c8fa3e3c6cf62121eeee616e6a92bd9201c6edd91beffe13"}, + {file = "coverage-7.10.5-cp314-cp314-win_arm64.whl", hash = "sha256:5e78bd9cf65da4c303bf663de0d73bf69f81e878bf72a94e9af67137c69b9fe9"}, + {file = "coverage-7.10.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5661bf987d91ec756a47c7e5df4fbcb949f39e32f9334ccd3f43233bbb65e508"}, + {file = "coverage-7.10.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a46473129244db42a720439a26984f8c6f834762fc4573616c1f37f13994b357"}, + {file = "coverage-7.10.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1f64b8d3415d60f24b058b58d859e9512624bdfa57a2d1f8aff93c1ec45c429b"}, + {file = "coverage-7.10.5-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:44d43de99a9d90b20e0163f9770542357f58860a26e24dc1d924643bd6aa7cb4"}, + {file = "coverage-7.10.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a931a87e5ddb6b6404e65443b742cb1c14959622777f2a4efd81fba84f5d91ba"}, + {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9559b906a100029274448f4c8b8b0a127daa4dade5661dfd821b8c188058842"}, + {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b08801e25e3b4526ef9ced1aa29344131a8f5213c60c03c18fe4c6170ffa2874"}, + {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ed9749bb8eda35f8b636fb7632f1c62f735a236a5d4edadd8bbcc5ea0542e732"}, + {file = "coverage-7.10.5-cp314-cp314t-win32.whl", hash = "sha256:609b60d123fc2cc63ccee6d17e4676699075db72d14ac3c107cc4976d516f2df"}, + {file = "coverage-7.10.5-cp314-cp314t-win_amd64.whl", hash = "sha256:0666cf3d2c1626b5a3463fd5b05f5e21f99e6aec40a3192eee4d07a15970b07f"}, + {file = "coverage-7.10.5-cp314-cp314t-win_arm64.whl", hash = "sha256:bc85eb2d35e760120540afddd3044a5bf69118a91a296a8b3940dfc4fdcfe1e2"}, + {file = "coverage-7.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:62835c1b00c4a4ace24c1a88561a5a59b612fbb83a525d1c70ff5720c97c0610"}, + {file = "coverage-7.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5255b3bbcc1d32a4069d6403820ac8e6dbcc1d68cb28a60a1ebf17e47028e898"}, + {file = "coverage-7.10.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3876385722e335d6e991c430302c24251ef9c2a9701b2b390f5473199b1b8ebf"}, + {file = "coverage-7.10.5-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8048ce4b149c93447a55d279078c8ae98b08a6951a3c4d2d7e87f4efc7bfe100"}, + {file = "coverage-7.10.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4028e7558e268dd8bcf4d9484aad393cafa654c24b4885f6f9474bf53183a82a"}, + {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03f47dc870eec0367fcdd603ca6a01517d2504e83dc18dbfafae37faec66129a"}, + {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2d488d7d42b6ded7ea0704884f89dcabd2619505457de8fc9a6011c62106f6e5"}, + {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b3dcf2ead47fa8be14224ee817dfc1df98043af568fe120a22f81c0eb3c34ad2"}, + {file = "coverage-7.10.5-cp39-cp39-win32.whl", hash = "sha256:02650a11324b80057b8c9c29487020073d5e98a498f1857f37e3f9b6ea1b2426"}, + {file = "coverage-7.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:b45264dd450a10f9e03237b41a9a24e85cbb1e278e5a32adb1a303f58f0017f3"}, + {file = "coverage-7.10.5-py3-none-any.whl", hash = "sha256:0be24d35e4db1d23d0db5c0f6a74a962e2ec83c426b5cac09f4234aadef38e4a"}, + {file = "coverage-7.10.5.tar.gz", hash = "sha256:f2e57716a78bc3ae80b2207be0709a3b2b63b9f2dcf9740ee6ac03588a2015b6"}, ] [package.extras] @@ -992,6 +979,29 @@ conda = ["pyyaml"] pipenv = ["pipenv"] poetry = ["poetry"] +[[package]] +name = "exchange-calendars" +version = "4.11.1" +description = "Calendars for securities exchanges" +optional = false +python-versions = "~=3.10" +groups = ["main"] +files = [ + {file = "exchange_calendars-4.11.1-py3-none-any.whl", hash = "sha256:40ec771589e5a9b96b9e09667cd0f3fde7c70444e3a7530b8989ebd0750ee478"}, + {file = "exchange_calendars-4.11.1.tar.gz", hash = "sha256:bdaf000c3c5a0087341e1fdfe063182d27585bdba3f1a3d0189a13bdb4afea5d"}, +] + +[package.dependencies] +korean_lunar_calendar = "*" +numpy = "*" +pandas = "*" +pyluach = "*" +toolz = "*" +tzdata = "*" + +[package.extras] +dev = ["flake8", "hypothesis", "pip-tools", "pytest", "pytest-benchmark", "pytest-xdist"] + [[package]] name = "executing" version = "2.2.0" @@ -1046,71 +1056,87 @@ devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benc [[package]] name = "filelock" -version = "3.18.0" +version = "3.12.4" description = "A platform independent file lock." optional = false -python-versions = ">=3.9" -groups = ["dev"] +python-versions = ">=3.8" +groups = ["dev", "security"] files = [ - {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, - {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, + {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"}, + {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"}, ] [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] +docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"] +typing = ["typing-extensions (>=4.7.1) ; python_version < \"3.11\""] [[package]] name = "fonttools" -version = "4.59.0" +version = "4.59.2" description = "Tools to manipulate font files" optional = false python-versions = ">=3.9" groups = ["jupyter"] files = [ - {file = "fonttools-4.59.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:524133c1be38445c5c0575eacea42dbd44374b310b1ffc4b60ff01d881fabb96"}, - {file = "fonttools-4.59.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21e606b2d38fed938dde871c5736822dd6bda7a4631b92e509a1f5cd1b90c5df"}, - {file = "fonttools-4.59.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e93df708c69a193fc7987192f94df250f83f3851fda49413f02ba5dded639482"}, - {file = "fonttools-4.59.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:62224a9bb85b4b66d1b46d45cbe43d71cbf8f527d332b177e3b96191ffbc1e64"}, - {file = "fonttools-4.59.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8974b2a266b54c96709bd5e239979cddfd2dbceed331aa567ea1d7c4a2202db"}, - {file = "fonttools-4.59.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:209b75943d158f610b78320eacb5539aa9e920bee2c775445b2846c65d20e19d"}, - {file = "fonttools-4.59.0-cp310-cp310-win32.whl", hash = "sha256:4c908a7036f0f3677f8afa577bcd973e3e20ddd2f7c42a33208d18bee95cdb6f"}, - {file = "fonttools-4.59.0-cp310-cp310-win_amd64.whl", hash = "sha256:8b4309a2775e4feee7356e63b163969a215d663399cce1b3d3b65e7ec2d9680e"}, - {file = "fonttools-4.59.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:841b2186adce48903c0fef235421ae21549020eca942c1da773ac380b056ab3c"}, - {file = "fonttools-4.59.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9bcc1e77fbd1609198966ded6b2a9897bd6c6bcbd2287a2fc7d75f1a254179c5"}, - {file = "fonttools-4.59.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:37c377f7cb2ab2eca8a0b319c68146d34a339792f9420fca6cd49cf28d370705"}, - {file = "fonttools-4.59.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa39475eaccb98f9199eccfda4298abaf35ae0caec676ffc25b3a5e224044464"}, - {file = "fonttools-4.59.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d3972b13148c1d1fbc092b27678a33b3080d1ac0ca305742b0119b75f9e87e38"}, - {file = "fonttools-4.59.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a408c3c51358c89b29cfa5317cf11518b7ce5de1717abb55c5ae2d2921027de6"}, - {file = "fonttools-4.59.0-cp311-cp311-win32.whl", hash = "sha256:6770d7da00f358183d8fd5c4615436189e4f683bdb6affb02cad3d221d7bb757"}, - {file = "fonttools-4.59.0-cp311-cp311-win_amd64.whl", hash = "sha256:84fc186980231a287b28560d3123bd255d3c6b6659828c642b4cf961e2b923d0"}, - {file = "fonttools-4.59.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f9b3a78f69dcbd803cf2fb3f972779875b244c1115481dfbdd567b2c22b31f6b"}, - {file = "fonttools-4.59.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:57bb7e26928573ee7c6504f54c05860d867fd35e675769f3ce01b52af38d48e2"}, - {file = "fonttools-4.59.0-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:4536f2695fe5c1ffb528d84a35a7d3967e5558d2af58b4775e7ab1449d65767b"}, - {file = "fonttools-4.59.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:885bde7d26e5b40e15c47bd5def48b38cbd50830a65f98122a8fb90962af7cd1"}, - {file = "fonttools-4.59.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6801aeddb6acb2c42eafa45bc1cb98ba236871ae6f33f31e984670b749a8e58e"}, - {file = "fonttools-4.59.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:31003b6a10f70742a63126b80863ab48175fb8272a18ca0846c0482968f0588e"}, - {file = "fonttools-4.59.0-cp312-cp312-win32.whl", hash = "sha256:fbce6dae41b692a5973d0f2158f782b9ad05babc2c2019a970a1094a23909b1b"}, - {file = "fonttools-4.59.0-cp312-cp312-win_amd64.whl", hash = "sha256:332bfe685d1ac58ca8d62b8d6c71c2e52a6c64bc218dc8f7825c9ea51385aa01"}, - {file = "fonttools-4.59.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:78813b49d749e1bb4db1c57f2d4d7e6db22c253cb0a86ad819f5dc197710d4b2"}, - {file = "fonttools-4.59.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:401b1941ce37e78b8fd119b419b617277c65ae9417742a63282257434fd68ea2"}, - {file = "fonttools-4.59.0-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:efd7e6660674e234e29937bc1481dceb7e0336bfae75b856b4fb272b5093c5d4"}, - {file = "fonttools-4.59.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51ab1ff33c19e336c02dee1e9fd1abd974a4ca3d8f7eef2a104d0816a241ce97"}, - {file = "fonttools-4.59.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a9bf8adc9e1f3012edc8f09b08336272aec0c55bc677422273e21280db748f7c"}, - {file = "fonttools-4.59.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:37e01c6ec0c98599778c2e688350d624fa4770fbd6144551bd5e032f1199171c"}, - {file = "fonttools-4.59.0-cp313-cp313-win32.whl", hash = "sha256:70d6b3ceaa9cc5a6ac52884f3b3d9544e8e231e95b23f138bdb78e6d4dc0eae3"}, - {file = "fonttools-4.59.0-cp313-cp313-win_amd64.whl", hash = "sha256:26731739daa23b872643f0e4072d5939960237d540c35c14e6a06d47d71ca8fe"}, - {file = "fonttools-4.59.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8d77f92438daeaddc05682f0f3dac90c5b9829bcac75b57e8ce09cb67786073c"}, - {file = "fonttools-4.59.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:60f6665579e909b618282f3c14fa0b80570fbf1ee0e67678b9a9d43aa5d67a37"}, - {file = "fonttools-4.59.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:169b99a2553a227f7b5fea8d9ecd673aa258617f466b2abc6091fe4512a0dcd0"}, - {file = "fonttools-4.59.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:052444a5d0151878e87e3e512a1aa1a0ab35ee4c28afde0a778e23b0ace4a7de"}, - {file = "fonttools-4.59.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d40dcf533ca481355aa7b682e9e079f766f35715defa4929aeb5597f9604272e"}, - {file = "fonttools-4.59.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b818db35879d2edf7f46c7e729c700a0bce03b61b9412f5a7118406687cb151d"}, - {file = "fonttools-4.59.0-cp39-cp39-win32.whl", hash = "sha256:2e7cf8044ce2598bb87e44ba1d2c6e45d7a8decf56055b92906dc53f67c76d64"}, - {file = "fonttools-4.59.0-cp39-cp39-win_amd64.whl", hash = "sha256:902425f5afe28572d65d2bf9c33edd5265c612ff82c69e6f83ea13eafc0dcbea"}, - {file = "fonttools-4.59.0-py3-none-any.whl", hash = "sha256:241313683afd3baacb32a6bd124d0bce7404bc5280e12e291bae1b9bba28711d"}, - {file = "fonttools-4.59.0.tar.gz", hash = "sha256:be392ec3529e2f57faa28709d60723a763904f71a2b63aabe14fee6648fe3b14"}, + {file = "fonttools-4.59.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2a159e36ae530650acd13604f364b3a2477eff7408dcac6a640d74a3744d2514"}, + {file = "fonttools-4.59.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8bd733e47bf4c6dee2b2d8af7a1f7b0c091909b22dbb969a29b2b991e61e5ba4"}, + {file = "fonttools-4.59.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7bb32e0e33795e3b7795bb9b88cb6a9d980d3cbe26dd57642471be547708e17a"}, + {file = "fonttools-4.59.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cdcdf7aad4bab7fd0f2938624a5a84eb4893be269f43a6701b0720b726f24df0"}, + {file = "fonttools-4.59.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4d974312a9f405628e64f475b1f5015a61fd338f0a1b61d15c4822f97d6b045b"}, + {file = "fonttools-4.59.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:12dc4670e6e6cc4553e8de190f86a549e08ca83a036363115d94a2d67488831e"}, + {file = "fonttools-4.59.2-cp310-cp310-win32.whl", hash = "sha256:1603b85d5922042563eea518e272b037baf273b9a57d0f190852b0b075079000"}, + {file = "fonttools-4.59.2-cp310-cp310-win_amd64.whl", hash = "sha256:2543b81641ea5b8ddfcae7926e62aafd5abc604320b1b119e5218c014a7a5d3c"}, + {file = "fonttools-4.59.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:511946e8d7ea5c0d6c7a53c4cb3ee48eda9ab9797cd9bf5d95829a398400354f"}, + {file = "fonttools-4.59.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8e5e2682cf7be766d84f462ba8828d01e00c8751a8e8e7ce12d7784ccb69a30d"}, + {file = "fonttools-4.59.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5729e12a982dba3eeae650de48b06f3b9ddb51e9aee2fcaf195b7d09a96250e2"}, + {file = "fonttools-4.59.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c52694eae5d652361d59ecdb5a2246bff7cff13b6367a12da8499e9df56d148d"}, + {file = "fonttools-4.59.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f1f1bbc23ba1312bd8959896f46f667753b90216852d2a8cfa2d07e0cb234144"}, + {file = "fonttools-4.59.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1a1bfe5378962825dabe741720885e8b9ae9745ec7ecc4a5ec1f1ce59a6062bf"}, + {file = "fonttools-4.59.2-cp311-cp311-win32.whl", hash = "sha256:e937790f3c2c18a1cbc7da101550a84319eb48023a715914477d2e7faeaba570"}, + {file = "fonttools-4.59.2-cp311-cp311-win_amd64.whl", hash = "sha256:9836394e2f4ce5f9c0a7690ee93bd90aa1adc6b054f1a57b562c5d242c903104"}, + {file = "fonttools-4.59.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:82906d002c349cad647a7634b004825a7335f8159d0d035ae89253b4abf6f3ea"}, + {file = "fonttools-4.59.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a10c1bd7644dc58f8862d8ba0cf9fb7fef0af01ea184ba6ce3f50ab7dfe74d5a"}, + {file = "fonttools-4.59.2-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:738f31f23e0339785fd67652a94bc69ea49e413dfdb14dcb8c8ff383d249464e"}, + {file = "fonttools-4.59.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ec99f9bdfee9cdb4a9172f9e8fd578cce5feb231f598909e0aecf5418da4f25"}, + {file = "fonttools-4.59.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0476ea74161322e08c7a982f83558a2b81b491509984523a1a540baf8611cc31"}, + {file = "fonttools-4.59.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:95922a922daa1f77cc72611747c156cfb38030ead72436a2c551d30ecef519b9"}, + {file = "fonttools-4.59.2-cp312-cp312-win32.whl", hash = "sha256:39ad9612c6a622726a6a130e8ab15794558591f999673f1ee7d2f3d30f6a3e1c"}, + {file = "fonttools-4.59.2-cp312-cp312-win_amd64.whl", hash = "sha256:980fd7388e461b19a881d35013fec32c713ffea1fc37aef2f77d11f332dfd7da"}, + {file = "fonttools-4.59.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:381bde13216ba09489864467f6bc0c57997bd729abfbb1ce6f807ba42c06cceb"}, + {file = "fonttools-4.59.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f33839aa091f7eef4e9078f5b7ab1b8ea4b1d8a50aeaef9fdb3611bba80869ec"}, + {file = "fonttools-4.59.2-cp313-cp313-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6235fc06bcbdb40186f483ba9d5d68f888ea68aa3c8dac347e05a7c54346fbc8"}, + {file = "fonttools-4.59.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:83ad6e5d06ef3a2884c4fa6384a20d6367b5cfe560e3b53b07c9dc65a7020e73"}, + {file = "fonttools-4.59.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d029804c70fddf90be46ed5305c136cae15800a2300cb0f6bba96d48e770dde0"}, + {file = "fonttools-4.59.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:95807a3b5e78f2714acaa26a33bc2143005cc05c0217b322361a772e59f32b89"}, + {file = "fonttools-4.59.2-cp313-cp313-win32.whl", hash = "sha256:b3ebda00c3bb8f32a740b72ec38537d54c7c09f383a4cfefb0b315860f825b08"}, + {file = "fonttools-4.59.2-cp313-cp313-win_amd64.whl", hash = "sha256:a72155928d7053bbde499d32a9c77d3f0f3d29ae72b5a121752481bcbd71e50f"}, + {file = "fonttools-4.59.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:d09e487d6bfbe21195801323ba95c91cb3523f0fcc34016454d4d9ae9eaa57fe"}, + {file = "fonttools-4.59.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:dec2f22486d7781087b173799567cffdcc75e9fb2f1c045f05f8317ccce76a3e"}, + {file = "fonttools-4.59.2-cp314-cp314-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1647201af10993090120da2e66e9526c4e20e88859f3e34aa05b8c24ded2a564"}, + {file = "fonttools-4.59.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:47742c33fe65f41eabed36eec2d7313a8082704b7b808752406452f766c573fc"}, + {file = "fonttools-4.59.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:92ac2d45794f95d1ad4cb43fa07e7e3776d86c83dc4b9918cf82831518165b4b"}, + {file = "fonttools-4.59.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:fa9ecaf2dcef8941fb5719e16322345d730f4c40599bbf47c9753de40eb03882"}, + {file = "fonttools-4.59.2-cp314-cp314-win32.whl", hash = "sha256:a8d40594982ed858780e18a7e4c80415af65af0f22efa7de26bdd30bf24e1e14"}, + {file = "fonttools-4.59.2-cp314-cp314-win_amd64.whl", hash = "sha256:9cde8b6a6b05f68516573523f2013a3574cb2c75299d7d500f44de82ba947b80"}, + {file = "fonttools-4.59.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:036cd87a2dbd7ef72f7b68df8314ced00b8d9973aee296f2464d06a836aeb9a9"}, + {file = "fonttools-4.59.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:14870930181493b1d740b6f25483e20185e5aea58aec7d266d16da7be822b4bb"}, + {file = "fonttools-4.59.2-cp314-cp314t-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7ff58ea1eb8fc7e05e9a949419f031890023f8785c925b44d6da17a6a7d6e85d"}, + {file = "fonttools-4.59.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6dee142b8b3096514c96ad9e2106bf039e2fe34a704c587585b569a36df08c3c"}, + {file = "fonttools-4.59.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8991bdbae39cf78bcc9cd3d81f6528df1f83f2e7c23ccf6f990fa1f0b6e19708"}, + {file = "fonttools-4.59.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:53c1a411b7690042535a4f0edf2120096a39a506adeb6c51484a232e59f2aa0c"}, + {file = "fonttools-4.59.2-cp314-cp314t-win32.whl", hash = "sha256:59d85088e29fa7a8f87d19e97a1beae2a35821ee48d8ef6d2c4f965f26cb9f8a"}, + {file = "fonttools-4.59.2-cp314-cp314t-win_amd64.whl", hash = "sha256:7ad5d8d8cc9e43cb438b3eb4a0094dd6d4088daa767b0a24d52529361fd4c199"}, + {file = "fonttools-4.59.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3cdf9d32690f0e235342055f0a6108eedfccf67b213b033bac747eb809809513"}, + {file = "fonttools-4.59.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:67f9640d6b31d66c0bc54bdbe8ed50983c755521c101576a25e377a8711e8207"}, + {file = "fonttools-4.59.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:464d15b58a9fd4304c728735fc1d42cd812fd9ebc27c45b18e78418efd337c28"}, + {file = "fonttools-4.59.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a039c38d5644c691eb53cd65360921338f54e44c90b4e764605711e046c926ee"}, + {file = "fonttools-4.59.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e4f5100e66ec307cce8b52fc03e379b5d1596e9cb8d8b19dfeeccc1e68d86c96"}, + {file = "fonttools-4.59.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:af6dbd463a3530256abf21f675ddf87646272bc48901803a185c49d06287fbf1"}, + {file = "fonttools-4.59.2-cp39-cp39-win32.whl", hash = "sha256:594a6fd2f8296583ac7babc4880c8deee7c4f05ab0141addc6bce8b8e367e996"}, + {file = "fonttools-4.59.2-cp39-cp39-win_amd64.whl", hash = "sha256:fc21c4a05226fd39715f66c1c28214862474db50df9f08fd1aa2f96698887bc3"}, + {file = "fonttools-4.59.2-py3-none-any.whl", hash = "sha256:8bd0f759020e87bb5d323e6283914d9bf4ae35a7307dafb2cbd1e379e720ad37"}, + {file = "fonttools-4.59.2.tar.gz", hash = "sha256:e72c0749b06113f50bcb80332364c6be83a9582d6e3db3fe0b280f996dc2ef22"}, ] [package.extras] @@ -1189,72 +1215,72 @@ files = [ [[package]] name = "greenlet" -version = "3.2.3" +version = "3.2.4" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.9" groups = ["main"] markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" files = [ - {file = "greenlet-3.2.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:1afd685acd5597349ee6d7a88a8bec83ce13c106ac78c196ee9dde7c04fe87be"}, - {file = "greenlet-3.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:761917cac215c61e9dc7324b2606107b3b292a8349bdebb31503ab4de3f559ac"}, - {file = "greenlet-3.2.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a433dbc54e4a37e4fff90ef34f25a8c00aed99b06856f0119dcf09fbafa16392"}, - {file = "greenlet-3.2.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:72e77ed69312bab0434d7292316d5afd6896192ac4327d44f3d613ecb85b037c"}, - {file = "greenlet-3.2.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:68671180e3849b963649254a882cd544a3c75bfcd2c527346ad8bb53494444db"}, - {file = "greenlet-3.2.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:49c8cfb18fb419b3d08e011228ef8a25882397f3a859b9fe1436946140b6756b"}, - {file = "greenlet-3.2.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:efc6dc8a792243c31f2f5674b670b3a95d46fa1c6a912b8e310d6f542e7b0712"}, - {file = "greenlet-3.2.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:731e154aba8e757aedd0781d4b240f1225b075b4409f1bb83b05ff410582cf00"}, - {file = "greenlet-3.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:96c20252c2f792defe9a115d3287e14811036d51e78b3aaddbee23b69b216302"}, - {file = "greenlet-3.2.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:784ae58bba89fa1fa5733d170d42486580cab9decda3484779f4759345b29822"}, - {file = "greenlet-3.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0921ac4ea42a5315d3446120ad48f90c3a6b9bb93dd9b3cf4e4d84a66e42de83"}, - {file = "greenlet-3.2.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d2971d93bb99e05f8c2c0c2f4aa9484a18d98c4c3bd3c62b65b7e6ae33dfcfaf"}, - {file = "greenlet-3.2.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c667c0bf9d406b77a15c924ef3285e1e05250948001220368e039b6aa5b5034b"}, - {file = "greenlet-3.2.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:592c12fb1165be74592f5de0d70f82bc5ba552ac44800d632214b76089945147"}, - {file = "greenlet-3.2.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:29e184536ba333003540790ba29829ac14bb645514fbd7e32af331e8202a62a5"}, - {file = "greenlet-3.2.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:93c0bb79844a367782ec4f429d07589417052e621aa39a5ac1fb99c5aa308edc"}, - {file = "greenlet-3.2.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:751261fc5ad7b6705f5f76726567375bb2104a059454e0226e1eef6c756748ba"}, - {file = "greenlet-3.2.3-cp311-cp311-win_amd64.whl", hash = "sha256:83a8761c75312361aa2b5b903b79da97f13f556164a7dd2d5448655425bd4c34"}, - {file = "greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d"}, - {file = "greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b"}, - {file = "greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d"}, - {file = "greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264"}, - {file = "greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688"}, - {file = "greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb"}, - {file = "greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c"}, - {file = "greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163"}, - {file = "greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849"}, - {file = "greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad"}, - {file = "greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef"}, - {file = "greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3"}, - {file = "greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95"}, - {file = "greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb"}, - {file = "greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b"}, - {file = "greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0"}, - {file = "greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36"}, - {file = "greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3"}, - {file = "greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86"}, - {file = "greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97"}, - {file = "greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728"}, - {file = "greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a"}, - {file = "greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892"}, - {file = "greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141"}, - {file = "greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a"}, - {file = "greenlet-3.2.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:42efc522c0bd75ffa11a71e09cd8a399d83fafe36db250a87cf1dacfaa15dc64"}, - {file = "greenlet-3.2.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d760f9bdfe79bff803bad32b4d8ffb2c1d2ce906313fc10a83976ffb73d64ca7"}, - {file = "greenlet-3.2.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8324319cbd7b35b97990090808fdc99c27fe5338f87db50514959f8059999805"}, - {file = "greenlet-3.2.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:8c37ef5b3787567d322331d5250e44e42b58c8c713859b8a04c6065f27efbf72"}, - {file = "greenlet-3.2.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ce539fb52fb774d0802175d37fcff5c723e2c7d249c65916257f0a940cee8904"}, - {file = "greenlet-3.2.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:003c930e0e074db83559edc8705f3a2d066d4aa8c2f198aff1e454946efd0f26"}, - {file = "greenlet-3.2.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7e70ea4384b81ef9e84192e8a77fb87573138aa5d4feee541d8014e452b434da"}, - {file = "greenlet-3.2.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:22eb5ba839c4b2156f18f76768233fe44b23a31decd9cc0d4cc8141c211fd1b4"}, - {file = "greenlet-3.2.3-cp39-cp39-win32.whl", hash = "sha256:4532f0d25df67f896d137431b13f4cdce89f7e3d4a96387a41290910df4d3a57"}, - {file = "greenlet-3.2.3-cp39-cp39-win_amd64.whl", hash = "sha256:aaa7aae1e7f75eaa3ae400ad98f8644bb81e1dc6ba47ce8a93d3f17274e08322"}, - {file = "greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365"}, + {file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f10fd42b5ee276335863712fa3da6608e93f70629c631bf77145021600abc23c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c8c9e331e58180d0d83c5b7999255721b725913ff6bc6cf39fa2a45841a4fd4b"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:58b97143c9cc7b86fc458f215bd0932f1757ce649e05b640fea2e79b54cedb31"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"}, + {file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"}, + {file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:94abf90142c2a18151632371140b3dba4dee031633fe614cb592dbb6c9e17bc3"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:4d1378601b85e2e5171b99be8d2dc85f594c79967599328f95c1dc1a40f1c633"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0db5594dce18db94f7d1650d7489909b57afde4c580806b8d9203b6e79cdc079"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"}, + {file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"}, + {file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"}, + {file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"}, + {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, + {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, + {file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"}, + {file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"}, + {file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:18d9260df2b5fbf41ae5139e1be4e796d99655f023a636cd0e11e6406cca7d58"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:671df96c1f23c4a0d4077a325483c1503c96a1b7d9db26592ae770daa41233d4"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:16458c245a38991aa19676900d48bd1a6f2ce3e16595051a4db9d012154e8433"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"}, + {file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"}, + {file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"}, + {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, ] [package.extras] docs = ["Sphinx", "furo"] -test = ["objgraph", "psutil"] +test = ["objgraph", "psutil", "setuptools"] [[package]] name = "h11" @@ -1317,14 +1343,14 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "identify" -version = "2.6.12" +version = "2.6.13" description = "File identification library for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2"}, - {file = "identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6"}, + {file = "identify-2.6.13-py2.py3-none-any.whl", hash = "sha256:60381139b3ae39447482ecc406944190f690d4a2997f2584062089848361b33b"}, + {file = "identify-2.6.13.tar.gz", hash = "sha256:da8d6c828e773620e13bfa86ea601c5a5310ba4bcd65edf378198b56a1f9fb32"}, ] [package.extras] @@ -1704,14 +1730,14 @@ test = ["click", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "p [[package]] name = "jupyter-lsp" -version = "2.2.6" +version = "2.3.0" description = "Multi-Language Server WebSocket proxy for Jupyter Notebook/Lab server" optional = false python-versions = ">=3.8" groups = ["jupyter"] files = [ - {file = "jupyter_lsp-2.2.6-py3-none-any.whl", hash = "sha256:283783752bf0b459ee7fa88effa72104d87dd343b82d5c06cf113ef755b15b6d"}, - {file = "jupyter_lsp-2.2.6.tar.gz", hash = "sha256:0566bd9bb04fd9e6774a937ed01522b555ba78be37bebef787c8ab22de4c0361"}, + {file = "jupyter_lsp-2.3.0-py3-none-any.whl", hash = "sha256:e914a3cb2addf48b1c7710914771aaf1819d46b2e5a79b0f917b5478ec93f34f"}, + {file = "jupyter_lsp-2.3.0.tar.gz", hash = "sha256:458aa59339dc868fb784d73364f17dbce8836e906cd75fd471a325cba02e0245"}, ] [package.dependencies] @@ -1719,14 +1745,14 @@ jupyter_server = ">=1.1.2" [[package]] name = "jupyter-server" -version = "2.16.0" +version = "2.17.0" description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." optional = false python-versions = ">=3.9" groups = ["jupyter"] files = [ - {file = "jupyter_server-2.16.0-py3-none-any.whl", hash = "sha256:3d8db5be3bc64403b1c65b400a1d7f4647a5ce743f3b20dbdefe8ddb7b55af9e"}, - {file = "jupyter_server-2.16.0.tar.gz", hash = "sha256:65d4b44fdf2dcbbdfe0aa1ace4a842d4aaf746a2b7b168134d5aaed35621b7f6"}, + {file = "jupyter_server-2.17.0-py3-none-any.whl", hash = "sha256:e8cb9c7db4251f51ed307e329b81b72ccf2056ff82d50524debde1ee1870e13f"}, + {file = "jupyter_server-2.17.0.tar.gz", hash = "sha256:c38ea898566964c888b4772ae1ed58eca84592e88251d2cfc4d171f81f7e99d5"}, ] [package.dependencies] @@ -1739,7 +1765,6 @@ jupyter-events = ">=0.11.0" jupyter-server-terminals = ">=0.4.4" nbconvert = ">=6.4.4" nbformat = ">=5.3.0" -overrides = ">=5.0" packaging = ">=22.0" prometheus-client = ">=0.9" pywinpty = {version = ">=2.0.1", markers = "os_name == \"nt\""} @@ -1860,92 +1885,125 @@ files = [ [[package]] name = "kiwisolver" -version = "1.4.8" +version = "1.4.9" description = "A fast implementation of the Cassowary constraint solver" optional = false python-versions = ">=3.10" groups = ["jupyter"] files = [ - {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88c6f252f6816a73b1f8c904f7bbe02fd67c09a69f7cb8a0eecdbf5ce78e63db"}, - {file = "kiwisolver-1.4.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c72941acb7b67138f35b879bbe85be0f6c6a70cab78fe3ef6db9c024d9223e5b"}, - {file = "kiwisolver-1.4.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce2cf1e5688edcb727fdf7cd1bbd0b6416758996826a8be1d958f91880d0809d"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c8bf637892dc6e6aad2bc6d4d69d08764166e5e3f69d469e55427b6ac001b19d"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:034d2c891f76bd3edbdb3ea11140d8510dca675443da7304205a2eaa45d8334c"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d47b28d1dfe0793d5e96bce90835e17edf9a499b53969b03c6c47ea5985844c3"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb158fe28ca0c29f2260cca8c43005329ad58452c36f0edf298204de32a9a3ed"}, - {file = "kiwisolver-1.4.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5536185fce131780ebd809f8e623bf4030ce1b161353166c49a3c74c287897f"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:369b75d40abedc1da2c1f4de13f3482cb99e3237b38726710f4a793432b1c5ff"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:641f2ddf9358c80faa22e22eb4c9f54bd3f0e442e038728f500e3b978d00aa7d"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d561d2d8883e0819445cfe58d7ddd673e4015c3c57261d7bdcd3710d0d14005c"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:1732e065704b47c9afca7ffa272f845300a4eb959276bf6970dc07265e73b605"}, - {file = "kiwisolver-1.4.8-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bcb1ebc3547619c3b58a39e2448af089ea2ef44b37988caf432447374941574e"}, - {file = "kiwisolver-1.4.8-cp310-cp310-win_amd64.whl", hash = "sha256:89c107041f7b27844179ea9c85d6da275aa55ecf28413e87624d033cf1f6b751"}, - {file = "kiwisolver-1.4.8-cp310-cp310-win_arm64.whl", hash = "sha256:b5773efa2be9eb9fcf5415ea3ab70fc785d598729fd6057bea38d539ead28271"}, - {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a4d3601908c560bdf880f07d94f31d734afd1bb71e96585cace0e38ef44c6d84"}, - {file = "kiwisolver-1.4.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:856b269c4d28a5c0d5e6c1955ec36ebfd1651ac00e1ce0afa3e28da95293b561"}, - {file = "kiwisolver-1.4.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c2b9a96e0f326205af81a15718a9073328df1173a2619a68553decb7097fd5d7"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c5020c83e8553f770cb3b5fc13faac40f17e0b205bd237aebd21d53d733adb03"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dace81d28c787956bfbfbbfd72fdcef014f37d9b48830829e488fdb32b49d954"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11e1022b524bd48ae56c9b4f9296bce77e15a2e42a502cceba602f804b32bb79"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b9b4d2892fefc886f30301cdd80debd8bb01ecdf165a449eb6e78f79f0fabd6"}, - {file = "kiwisolver-1.4.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a96c0e790ee875d65e340ab383700e2b4891677b7fcd30a699146f9384a2bb0"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:23454ff084b07ac54ca8be535f4174170c1094a4cff78fbae4f73a4bcc0d4dab"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:87b287251ad6488e95b4f0b4a79a6d04d3ea35fde6340eb38fbd1ca9cd35bbbc"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:b21dbe165081142b1232a240fc6383fd32cdd877ca6cc89eab93e5f5883e1c25"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:768cade2c2df13db52475bd28d3a3fac8c9eff04b0e9e2fda0f3760f20b3f7fc"}, - {file = "kiwisolver-1.4.8-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:d47cfb2650f0e103d4bf68b0b5804c68da97272c84bb12850d877a95c056bd67"}, - {file = "kiwisolver-1.4.8-cp311-cp311-win_amd64.whl", hash = "sha256:ed33ca2002a779a2e20eeb06aea7721b6e47f2d4b8a8ece979d8ba9e2a167e34"}, - {file = "kiwisolver-1.4.8-cp311-cp311-win_arm64.whl", hash = "sha256:16523b40aab60426ffdebe33ac374457cf62863e330a90a0383639ce14bf44b2"}, - {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502"}, - {file = "kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31"}, - {file = "kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a"}, - {file = "kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d"}, - {file = "kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8"}, - {file = "kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50"}, - {file = "kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476"}, - {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1c8ceb754339793c24aee1c9fb2485b5b1f5bb1c2c214ff13368431e51fc9a09"}, - {file = "kiwisolver-1.4.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a62808ac74b5e55a04a408cda6156f986cefbcf0ada13572696b507cc92fa1"}, - {file = "kiwisolver-1.4.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:68269e60ee4929893aad82666821aaacbd455284124817af45c11e50a4b42e3c"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34d142fba9c464bc3bbfeff15c96eab0e7310343d6aefb62a79d51421fcc5f1b"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc373e0eef45b59197de815b1b28ef89ae3955e7722cc9710fb91cd77b7f47"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:77e6f57a20b9bd4e1e2cedda4d0b986ebd0216236f0106e55c28aea3d3d69b16"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08e77738ed7538f036cd1170cbed942ef749137b1311fa2bbe2a7fda2f6bf3cc"}, - {file = "kiwisolver-1.4.8-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5ce1e481a74b44dd5e92ff03ea0cb371ae7a0268318e202be06c8f04f4f1246"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:fc2ace710ba7c1dfd1a3b42530b62b9ceed115f19a1656adefce7b1782a37794"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3452046c37c7692bd52b0e752b87954ef86ee2224e624ef7ce6cb21e8c41cc1b"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7e9a60b50fe8b2ec6f448fe8d81b07e40141bfced7f896309df271a0b92f80f3"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:918139571133f366e8362fa4a297aeba86c7816b7ecf0bc79168080e2bd79957"}, - {file = "kiwisolver-1.4.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e063ef9f89885a1d68dd8b2e18f5ead48653176d10a0e324e3b0030e3a69adeb"}, - {file = "kiwisolver-1.4.8-cp313-cp313-win_amd64.whl", hash = "sha256:a17b7c4f5b2c51bb68ed379defd608a03954a1845dfed7cc0117f1cc8a9b7fd2"}, - {file = "kiwisolver-1.4.8-cp313-cp313-win_arm64.whl", hash = "sha256:3cd3bc628b25f74aedc6d374d5babf0166a92ff1317f46267f12d2ed54bc1d30"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:370fd2df41660ed4e26b8c9d6bbcad668fbe2560462cba151a721d49e5b6628c"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:84a2f830d42707de1d191b9490ac186bf7997a9495d4e9072210a1296345f7dc"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7a3ad337add5148cf51ce0b55642dc551c0b9d6248458a757f98796ca7348712"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7506488470f41169b86d8c9aeff587293f530a23a23a49d6bc64dab66bedc71e"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f0121b07b356a22fb0414cec4666bbe36fd6d0d759db3d37228f496ed67c880"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d6d6bd87df62c27d4185de7c511c6248040afae67028a8a22012b010bc7ad062"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:291331973c64bb9cce50bbe871fb2e675c4331dab4f31abe89f175ad7679a4d7"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:893f5525bb92d3d735878ec00f781b2de998333659507d29ea4466208df37bed"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b47a465040146981dc9db8647981b8cb96366fbc8d452b031e4f8fdffec3f26d"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:99cea8b9dd34ff80c521aef46a1dddb0dcc0283cf18bde6d756f1e6f31772165"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:151dffc4865e5fe6dafce5480fab84f950d14566c480c08a53c663a0020504b6"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:577facaa411c10421314598b50413aa1ebcf5126f704f1e5d72d7e4e9f020d90"}, - {file = "kiwisolver-1.4.8-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:be4816dc51c8a471749d664161b434912eee82f2ea66bd7628bd14583a833e85"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e7a019419b7b510f0f7c9dceff8c5eae2392037eae483a7f9162625233802b0a"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:286b18e86682fd2217a48fc6be6b0f20c1d0ed10958d8dc53453ad58d7be0bf8"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4191ee8dfd0be1c3666ccbac178c5a05d5f8d689bbe3fc92f3c4abec817f8fe0"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cd2785b9391f2873ad46088ed7599a6a71e762e1ea33e87514b1a441ed1da1c"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c07b29089b7ba090b6f1a669f1411f27221c3662b3a1b7010e67b59bb5a6f10b"}, - {file = "kiwisolver-1.4.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:65ea09a5a3faadd59c2ce96dc7bf0f364986a315949dc6374f04396b0d60e09b"}, - {file = "kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b4b4d74bda2b8ebf4da5bd42af11d02d04428b2c32846e4c2c93219df8a7987b"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fb3b8132019ea572f4611d770991000d7f58127560c4889729248eb5852a102f"}, + {file = "kiwisolver-1.4.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84fd60810829c27ae375114cd379da1fa65e6918e1da405f356a775d49a62bcf"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b78efa4c6e804ecdf727e580dbb9cba85624d2e1c6b5cb059c66290063bd99a9"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4efec7bcf21671db6a3294ff301d2fc861c31faa3c8740d1a94689234d1b415"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:90f47e70293fc3688b71271100a1a5453aa9944a81d27ff779c108372cf5567b"}, + {file = "kiwisolver-1.4.9-cp310-cp310-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fdca1def57a2e88ef339de1737a1449d6dbf5fab184c54a1fca01d541317154"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9cf554f21be770f5111a1690d42313e140355e687e05cf82cb23d0a721a64a48"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:fc1795ac5cd0510207482c3d1d3ed781143383b8cfd36f5c645f3897ce066220"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ccd09f20ccdbbd341b21a67ab50a119b64a403b09288c27481575105283c1586"}, + {file = "kiwisolver-1.4.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:540c7c72324d864406a009d72f5d6856f49693db95d1fbb46cf86febef873634"}, + {file = "kiwisolver-1.4.9-cp310-cp310-win_amd64.whl", hash = "sha256:ede8c6d533bc6601a47ad4046080d36b8fc99f81e6f1c17b0ac3c2dc91ac7611"}, + {file = "kiwisolver-1.4.9-cp310-cp310-win_arm64.whl", hash = "sha256:7b4da0d01ac866a57dd61ac258c5607b4cd677f63abaec7b148354d2b2cdd536"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:eb14a5da6dc7642b0f3a18f13654847cd8b7a2550e2645a5bda677862b03ba16"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39a219e1c81ae3b103643d2aedb90f1ef22650deb266ff12a19e7773f3e5f089"}, + {file = "kiwisolver-1.4.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2405a7d98604b87f3fc28b1716783534b1b4b8510d8142adca34ee0bc3c87543"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:dc1ae486f9abcef254b5618dfb4113dd49f94c68e3e027d03cf0143f3f772b61"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a1f570ce4d62d718dce3f179ee78dac3b545ac16c0c04bb363b7607a949c0d1"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:cb27e7b78d716c591e88e0a09a2139c6577865d7f2e152488c2cc6257f460872"}, + {file = "kiwisolver-1.4.9-cp311-cp311-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:15163165efc2f627eb9687ea5f3a28137217d217ac4024893d753f46bce9de26"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:bdee92c56a71d2b24c33a7d4c2856bd6419d017e08caa7802d2963870e315028"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:412f287c55a6f54b0650bd9b6dce5aceddb95864a1a90c87af16979d37c89771"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2c93f00dcba2eea70af2be5f11a830a742fe6b579a1d4e00f47760ef13be247a"}, + {file = "kiwisolver-1.4.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f117e1a089d9411663a3207ba874f31be9ac8eaa5b533787024dc07aeb74f464"}, + {file = "kiwisolver-1.4.9-cp311-cp311-win_amd64.whl", hash = "sha256:be6a04e6c79819c9a8c2373317d19a96048e5a3f90bec587787e86a1153883c2"}, + {file = "kiwisolver-1.4.9-cp311-cp311-win_arm64.whl", hash = "sha256:0ae37737256ba2de764ddc12aed4956460277f00c4996d51a197e72f62f5eec7"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2"}, + {file = "kiwisolver-1.4.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77"}, + {file = "kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2"}, + {file = "kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145"}, + {file = "kiwisolver-1.4.9-cp312-cp312-win_amd64.whl", hash = "sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54"}, + {file = "kiwisolver-1.4.9-cp312-cp312-win_arm64.whl", hash = "sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5d0432ccf1c7ab14f9949eec60c5d1f924f17c037e9f8b33352fa05799359b8"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efb3a45b35622bb6c16dbfab491a8f5a391fe0e9d45ef32f4df85658232ca0e2"}, + {file = "kiwisolver-1.4.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1a12cf6398e8a0a001a059747a1cbf24705e18fe413bc22de7b3d15c67cffe3f"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b67e6efbf68e077dd71d1a6b37e43e1a99d0bff1a3d51867d45ee8908b931098"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5656aa670507437af0207645273ccdfee4f14bacd7f7c67a4306d0dcaeaf6eed"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:bfc08add558155345129c7803b3671cf195e6a56e7a12f3dde7c57d9b417f525"}, + {file = "kiwisolver-1.4.9-cp313-cp313-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:40092754720b174e6ccf9e845d0d8c7d8e12c3d71e7fc35f55f3813e96376f78"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:497d05f29a1300d14e02e6441cf0f5ee81c1ff5a304b0d9fb77423974684e08b"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:bdd1a81a1860476eb41ac4bc1e07b3f07259e6d55bbf739b79c8aaedcf512799"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:e6b93f13371d341afee3be9f7c5964e3fe61d5fa30f6a30eb49856935dfe4fc3"}, + {file = "kiwisolver-1.4.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d75aa530ccfaa593da12834b86a0724f58bff12706659baa9227c2ccaa06264c"}, + {file = "kiwisolver-1.4.9-cp313-cp313-win_amd64.whl", hash = "sha256:dd0a578400839256df88c16abddf9ba14813ec5f21362e1fe65022e00c883d4d"}, + {file = "kiwisolver-1.4.9-cp313-cp313-win_arm64.whl", hash = "sha256:d4188e73af84ca82468f09cadc5ac4db578109e52acb4518d8154698d3a87ca2"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:5a0f2724dfd4e3b3ac5a82436a8e6fd16baa7d507117e4279b660fe8ca38a3a1"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:1b11d6a633e4ed84fc0ddafd4ebfd8ea49b3f25082c04ad12b8315c11d504dc1"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61874cdb0a36016354853593cffc38e56fc9ca5aa97d2c05d3dcf6922cd55a11"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:60c439763a969a6af93b4881db0eed8fadf93ee98e18cbc35bc8da868d0c4f0c"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92a2f997387a1b79a75e7803aa7ded2cfbe2823852ccf1ba3bcf613b62ae3197"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a31d512c812daea6d8b3be3b2bfcbeb091dbb09177706569bcfc6240dcf8b41c"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:52a15b0f35dad39862d376df10c5230155243a2c1a436e39eb55623ccbd68185"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a30fd6fdef1430fd9e1ba7b3398b5ee4e2887783917a687d86ba69985fb08748"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:cc9617b46837c6468197b5945e196ee9ca43057bb7d9d1ae688101e4e1dddf64"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:0ab74e19f6a2b027ea4f845a78827969af45ce790e6cb3e1ebab71bdf9f215ff"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dba5ee5d3981160c28d5490f0d1b7ed730c22470ff7f6cc26cfcfaacb9896a07"}, + {file = "kiwisolver-1.4.9-cp313-cp313t-win_arm64.whl", hash = "sha256:0749fd8f4218ad2e851e11cc4dc05c7cbc0cbc4267bdfdb31782e65aace4ee9c"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:9928fe1eb816d11ae170885a74d074f57af3a0d65777ca47e9aeb854a1fba386"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d0005b053977e7b43388ddec89fa567f43d4f6d5c2c0affe57de5ebf290dc552"}, + {file = "kiwisolver-1.4.9-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:2635d352d67458b66fd0667c14cb1d4145e9560d503219034a18a87e971ce4f3"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:767c23ad1c58c9e827b649a9ab7809fd5fd9db266a9cf02b0e926ddc2c680d58"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:72d0eb9fba308b8311685c2268cf7d0a0639a6cd027d8128659f72bdd8a024b4"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f68e4f3eeca8fb22cc3d731f9715a13b652795ef657a13df1ad0c7dc0e9731df"}, + {file = "kiwisolver-1.4.9-cp314-cp314-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d84cd4061ae292d8ac367b2c3fa3aad11cb8625a95d135fe93f286f914f3f5a6"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a60ea74330b91bd22a29638940d115df9dc00af5035a9a2a6ad9399ffb4ceca5"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ce6a3a4e106cf35c2d9c4fa17c05ce0b180db622736845d4315519397a77beaf"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:77937e5e2a38a7b48eef0585114fe7930346993a88060d0bf886086d2aa49ef5"}, + {file = "kiwisolver-1.4.9-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:24c175051354f4a28c5d6a31c93906dc653e2bf234e8a4bbfb964892078898ce"}, + {file = "kiwisolver-1.4.9-cp314-cp314-win_amd64.whl", hash = "sha256:0763515d4df10edf6d06a3c19734e2566368980d21ebec439f33f9eb936c07b7"}, + {file = "kiwisolver-1.4.9-cp314-cp314-win_arm64.whl", hash = "sha256:0e4e2bf29574a6a7b7f6cb5fa69293b9f96c928949ac4a53ba3f525dffb87f9c"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:d976bbb382b202f71c67f77b0ac11244021cfa3f7dfd9e562eefcea2df711548"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2489e4e5d7ef9a1c300a5e0196e43d9c739f066ef23270607d45aba368b91f2d"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:e2ea9f7ab7fbf18fffb1b5434ce7c69a07582f7acc7717720f1d69f3e806f90c"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b34e51affded8faee0dfdb705416153819d8ea9250bbbf7ea1b249bdeb5f1122"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d8aacd3d4b33b772542b2e01beb50187536967b514b00003bdda7589722d2a64"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7cf974dd4e35fa315563ac99d6287a1024e4dc2077b8a7d7cd3d2fb65d283134"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:85bd218b5ecfbee8c8a82e121802dcb519a86044c9c3b2e4aef02fa05c6da370"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0856e241c2d3df4efef7c04a1e46b1936b6120c9bcf36dd216e3acd84bc4fb21"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9af39d6551f97d31a4deebeac6f45b156f9755ddc59c07b402c148f5dbb6482a"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:bb4ae2b57fc1d8cbd1cf7b1d9913803681ffa903e7488012be5b76dedf49297f"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:aedff62918805fb62d43a4aa2ecd4482c380dc76cd31bd7c8878588a61bd0369"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-win_amd64.whl", hash = "sha256:1fa333e8b2ce4d9660f2cda9c0e1b6bafcfb2457a9d259faa82289e73ec24891"}, + {file = "kiwisolver-1.4.9-cp314-cp314t-win_arm64.whl", hash = "sha256:4a48a2ce79d65d363597ef7b567ce3d14d68783d2b2263d98db3d9477805ba32"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4d1d9e582ad4d63062d34077a9a1e9f3c34088a2ec5135b1f7190c07cf366527"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:deed0c7258ceb4c44ad5ec7d9918f9f14fd05b2be86378d86cf50e63d1e7b771"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0a590506f303f512dff6b7f75fd2fd18e16943efee932008fe7140e5fa91d80e"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e09c2279a4d01f099f52d5c4b3d9e208e91edcbd1a175c9662a8b16e000fece9"}, + {file = "kiwisolver-1.4.9-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c9e7cdf45d594ee04d5be1b24dd9d49f3d1590959b2271fb30b5ca2b262c00fb"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:720e05574713db64c356e86732c0f3c5252818d05f9df320f0ad8380641acea5"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:17680d737d5335b552994a2008fab4c851bcd7de33094a82067ef3a576ff02fa"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:85b5352f94e490c028926ea567fc569c52ec79ce131dadb968d3853e809518c2"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:464415881e4801295659462c49461a24fb107c140de781d55518c4b80cb6790f"}, + {file = "kiwisolver-1.4.9-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:fb940820c63a9590d31d88b815e7a3aa5915cad3ce735ab45f0c730b39547de1"}, + {file = "kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d"}, +] + +[[package]] +name = "korean-lunar-calendar" +version = "0.3.1" +description = "Korean Lunar Calendar" +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "korean_lunar_calendar-0.3.1-py3-none-any.whl", hash = "sha256:392757135c492c4f42a604e6038042953c35c6f449dda5f27e3f86a7f9c943e5"}, + {file = "korean_lunar_calendar-0.3.1.tar.gz", hash = "sha256:eb2c485124a061016926bdea6d89efdf9b9fdbf16db55895b6cf1e5bec17b857"}, ] [[package]] @@ -2209,14 +2267,14 @@ files = [ [[package]] name = "narwhals" -version = "2.0.1" +version = "2.2.0" description = "Extremely lightweight compatibility layer between dataframe libraries" optional = false python-versions = ">=3.9" groups = ["main", "jupyter"] files = [ - {file = "narwhals-2.0.1-py3-none-any.whl", hash = "sha256:837457e36a2ba1710c881fb69e1f79ce44fb81728c92ac378f70892a53af8ddb"}, - {file = "narwhals-2.0.1.tar.gz", hash = "sha256:235e61ca807bc21110ca36a4d53888ecc22c42dcdf50a7c886e10dde3fd7f38c"}, + {file = "narwhals-2.2.0-py3-none-any.whl", hash = "sha256:2b5e3d61a486fa4328c286b0c8018b3e781a964947ff725d66ba12f6d5ca3d2a"}, + {file = "narwhals-2.2.0.tar.gz", hash = "sha256:f6a34f2699acabe2c17339c104f0bec28b9f7a55fbc7f8d485d49bea72d12b8a"}, ] [package.extras] @@ -2464,18 +2522,6 @@ files = [ {file = "numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48"}, ] -[[package]] -name = "overrides" -version = "7.7.0" -description = "A decorator to automatically detect mismatch when overriding a method." -optional = false -python-versions = ">=3.6" -groups = ["jupyter"] -files = [ - {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, - {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, -] - [[package]] name = "packaging" version = "25.0" @@ -2490,54 +2536,54 @@ files = [ [[package]] name = "pandas" -version = "2.3.1" +version = "2.3.2" description = "Powerful data structures for data analysis, time series, and statistics" optional = false python-versions = ">=3.9" groups = ["main", "jupyter"] files = [ - {file = "pandas-2.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:22c2e866f7209ebc3a8f08d75766566aae02bcc91d196935a1d9e59c7b990ac9"}, - {file = "pandas-2.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3583d348546201aff730c8c47e49bc159833f971c2899d6097bce68b9112a4f1"}, - {file = "pandas-2.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f951fbb702dacd390561e0ea45cdd8ecfa7fb56935eb3dd78e306c19104b9b0"}, - {file = "pandas-2.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd05b72ec02ebfb993569b4931b2e16fbb4d6ad6ce80224a3ee838387d83a191"}, - {file = "pandas-2.3.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1b916a627919a247d865aed068eb65eb91a344b13f5b57ab9f610b7716c92de1"}, - {file = "pandas-2.3.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fe67dc676818c186d5a3d5425250e40f179c2a89145df477dd82945eaea89e97"}, - {file = "pandas-2.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:2eb789ae0274672acbd3c575b0598d213345660120a257b47b5dafdc618aec83"}, - {file = "pandas-2.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2b0540963d83431f5ce8870ea02a7430adca100cec8a050f0811f8e31035541b"}, - {file = "pandas-2.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fe7317f578c6a153912bd2292f02e40c1d8f253e93c599e82620c7f69755c74f"}, - {file = "pandas-2.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6723a27ad7b244c0c79d8e7007092d7c8f0f11305770e2f4cd778b3ad5f9f85"}, - {file = "pandas-2.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3462c3735fe19f2638f2c3a40bd94ec2dc5ba13abbb032dd2fa1f540a075509d"}, - {file = "pandas-2.3.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:98bcc8b5bf7afed22cc753a28bc4d9e26e078e777066bc53fac7904ddef9a678"}, - {file = "pandas-2.3.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4d544806b485ddf29e52d75b1f559142514e60ef58a832f74fb38e48d757b299"}, - {file = "pandas-2.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:b3cd4273d3cb3707b6fffd217204c52ed92859533e31dc03b7c5008aa933aaab"}, - {file = "pandas-2.3.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:689968e841136f9e542020698ee1c4fbe9caa2ed2213ae2388dc7b81721510d3"}, - {file = "pandas-2.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:025e92411c16cbe5bb2a4abc99732a6b132f439b8aab23a59fa593eb00704232"}, - {file = "pandas-2.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b7ff55f31c4fcb3e316e8f7fa194566b286d6ac430afec0d461163312c5841e"}, - {file = "pandas-2.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dcb79bf373a47d2a40cf7232928eb7540155abbc460925c2c96d2d30b006eb4"}, - {file = "pandas-2.3.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:56a342b231e8862c96bdb6ab97170e203ce511f4d0429589c8ede1ee8ece48b8"}, - {file = "pandas-2.3.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ca7ed14832bce68baef331f4d7f294411bed8efd032f8109d690df45e00c4679"}, - {file = "pandas-2.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:ac942bfd0aca577bef61f2bc8da8147c4ef6879965ef883d8e8d5d2dc3e744b8"}, - {file = "pandas-2.3.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9026bd4a80108fac2239294a15ef9003c4ee191a0f64b90f170b40cfb7cf2d22"}, - {file = "pandas-2.3.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6de8547d4fdb12421e2d047a2c446c623ff4c11f47fddb6b9169eb98ffba485a"}, - {file = "pandas-2.3.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:782647ddc63c83133b2506912cc6b108140a38a37292102aaa19c81c83db2928"}, - {file = "pandas-2.3.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ba6aff74075311fc88504b1db890187a3cd0f887a5b10f5525f8e2ef55bfdb9"}, - {file = "pandas-2.3.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e5635178b387bd2ba4ac040f82bc2ef6e6b500483975c4ebacd34bec945fda12"}, - {file = "pandas-2.3.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6f3bf5ec947526106399a9e1d26d40ee2b259c66422efdf4de63c848492d91bb"}, - {file = "pandas-2.3.1-cp313-cp313-win_amd64.whl", hash = "sha256:1c78cf43c8fde236342a1cb2c34bcff89564a7bfed7e474ed2fffa6aed03a956"}, - {file = "pandas-2.3.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:8dfc17328e8da77be3cf9f47509e5637ba8f137148ed0e9b5241e1baf526e20a"}, - {file = "pandas-2.3.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ec6c851509364c59a5344458ab935e6451b31b818be467eb24b0fe89bd05b6b9"}, - {file = "pandas-2.3.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:911580460fc4884d9b05254b38a6bfadddfcc6aaef856fb5859e7ca202e45275"}, - {file = "pandas-2.3.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f4d6feeba91744872a600e6edbbd5b033005b431d5ae8379abee5bcfa479fab"}, - {file = "pandas-2.3.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:fe37e757f462d31a9cd7580236a82f353f5713a80e059a29753cf938c6775d96"}, - {file = "pandas-2.3.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5db9637dbc24b631ff3707269ae4559bce4b7fd75c1c4d7e13f40edc42df4444"}, - {file = "pandas-2.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4645f770f98d656f11c69e81aeb21c6fca076a44bed3dcbb9396a4311bc7f6d8"}, - {file = "pandas-2.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:342e59589cc454aaff7484d75b816a433350b3d7964d7847327edda4d532a2e3"}, - {file = "pandas-2.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d12f618d80379fde6af007f65f0c25bd3e40251dbd1636480dfffce2cf1e6da"}, - {file = "pandas-2.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd71c47a911da120d72ef173aeac0bf5241423f9bfea57320110a978457e069e"}, - {file = "pandas-2.3.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:09e3b1587f0f3b0913e21e8b32c3119174551deb4a4eba4a89bc7377947977e7"}, - {file = "pandas-2.3.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2323294c73ed50f612f67e2bf3ae45aea04dce5690778e08a09391897f35ff88"}, - {file = "pandas-2.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:b4b0de34dc8499c2db34000ef8baad684cfa4cbd836ecee05f323ebfba348c7d"}, - {file = "pandas-2.3.1.tar.gz", hash = "sha256:0a95b9ac964fe83ce317827f80304d37388ea77616b1425f0ae41c9d2d0d7bb2"}, + {file = "pandas-2.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52bc29a946304c360561974c6542d1dd628ddafa69134a7131fdfd6a5d7a1a35"}, + {file = "pandas-2.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:220cc5c35ffaa764dd5bb17cf42df283b5cb7fdf49e10a7b053a06c9cb48ee2b"}, + {file = "pandas-2.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c05e15111221384019897df20c6fe893b2f697d03c811ee67ec9e0bb5a3424"}, + {file = "pandas-2.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc03acc273c5515ab69f898df99d9d4f12c4d70dbfc24c3acc6203751d0804cf"}, + {file = "pandas-2.3.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d25c20a03e8870f6339bcf67281b946bd20b86f1a544ebbebb87e66a8d642cba"}, + {file = "pandas-2.3.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21bb612d148bb5860b7eb2c10faacf1a810799245afd342cf297d7551513fbb6"}, + {file = "pandas-2.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:b62d586eb25cb8cb70a5746a378fc3194cb7f11ea77170d59f889f5dfe3cec7a"}, + {file = "pandas-2.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1333e9c299adcbb68ee89a9bb568fc3f20f9cbb419f1dd5225071e6cddb2a743"}, + {file = "pandas-2.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:76972bcbd7de8e91ad5f0ca884a9f2c477a2125354af624e022c49e5bd0dfff4"}, + {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b98bdd7c456a05eef7cd21fd6b29e3ca243591fe531c62be94a2cc987efb5ac2"}, + {file = "pandas-2.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d81573b3f7db40d020983f78721e9bfc425f411e616ef019a10ebf597aedb2e"}, + {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e190b738675a73b581736cc8ec71ae113d6c3768d0bd18bffa5b9a0927b0b6ea"}, + {file = "pandas-2.3.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c253828cb08f47488d60f43c5fc95114c771bbfff085da54bfc79cb4f9e3a372"}, + {file = "pandas-2.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:9467697b8083f9667b212633ad6aa4ab32436dcbaf4cd57325debb0ddef2012f"}, + {file = "pandas-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fbb977f802156e7a3f829e9d1d5398f6192375a3e2d1a9ee0803e35fe70a2b9"}, + {file = "pandas-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1b9b52693123dd234b7c985c68b709b0b009f4521000d0525f2b95c22f15944b"}, + {file = "pandas-2.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bd281310d4f412733f319a5bc552f86d62cddc5f51d2e392c8787335c994175"}, + {file = "pandas-2.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96d31a6b4354e3b9b8a2c848af75d31da390657e3ac6f30c05c82068b9ed79b9"}, + {file = "pandas-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:df4df0b9d02bb873a106971bb85d448378ef14b86ba96f035f50bbd3688456b4"}, + {file = "pandas-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:213a5adf93d020b74327cb2c1b842884dbdd37f895f42dcc2f09d451d949f811"}, + {file = "pandas-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c13b81a9347eb8c7548f53fd9a4f08d4dfe996836543f805c987bafa03317ae"}, + {file = "pandas-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:0c6ecbac99a354a051ef21c5307601093cb9e0f4b1855984a084bfec9302699e"}, + {file = "pandas-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c6f048aa0fd080d6a06cc7e7537c09b53be6642d330ac6f54a600c3ace857ee9"}, + {file = "pandas-2.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0064187b80a5be6f2f9c9d6bdde29372468751dfa89f4211a3c5871854cfbf7a"}, + {file = "pandas-2.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ac8c320bded4718b298281339c1a50fb00a6ba78cb2a63521c39bec95b0209b"}, + {file = "pandas-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:114c2fe4f4328cf98ce5716d1532f3ab79c5919f95a9cfee81d9140064a2e4d6"}, + {file = "pandas-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:48fa91c4dfb3b2b9bfdb5c24cd3567575f4e13f9636810462ffed8925352be5a"}, + {file = "pandas-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:12d039facec710f7ba305786837d0225a3444af7bbd9c15c32ca2d40d157ed8b"}, + {file = "pandas-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c624b615ce97864eb588779ed4046186f967374185c047070545253a52ab2d57"}, + {file = "pandas-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0cee69d583b9b128823d9514171cabb6861e09409af805b54459bd0c821a35c2"}, + {file = "pandas-2.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2319656ed81124982900b4c37f0e0c58c015af9a7bbc62342ba5ad07ace82ba9"}, + {file = "pandas-2.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b37205ad6f00d52f16b6d09f406434ba928c1a1966e2771006a9033c736d30d2"}, + {file = "pandas-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:837248b4fc3a9b83b9c6214699a13f069dc13510a6a6d7f9ba33145d2841a012"}, + {file = "pandas-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d2c3554bd31b731cd6490d94a28f3abb8dd770634a9e06eb6d2911b9827db370"}, + {file = "pandas-2.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:88080a0ff8a55eac9c84e3ff3c7665b3b5476c6fbc484775ca1910ce1c3e0b87"}, + {file = "pandas-2.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d4a558c7620340a0931828d8065688b3cc5b4c8eb674bcaf33d18ff4a6870b4a"}, + {file = "pandas-2.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45178cf09d1858a1509dc73ec261bf5b25a625a389b65be2e47b559905f0ab6a"}, + {file = "pandas-2.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77cefe00e1b210f9c76c697fedd8fdb8d3dd86563e9c8adc9fa72b90f5e9e4c2"}, + {file = "pandas-2.3.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:13bd629c653856f00c53dc495191baa59bcafbbf54860a46ecc50d3a88421a96"}, + {file = "pandas-2.3.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:36d627906fd44b5fd63c943264e11e96e923f8de77d6016dc2f667b9ad193438"}, + {file = "pandas-2.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a9d7ec92d71a420185dec44909c32e9a362248c4ae2238234b76d5be37f208cc"}, + {file = "pandas-2.3.2.tar.gz", hash = "sha256:ab7b58f8f82706890924ccdfb5f48002b83d2b5a3845976a9fb705d36c34dcdb"}, ] [package.dependencies] @@ -2585,35 +2631,20 @@ files = [ [[package]] name = "parso" -version = "0.8.4" +version = "0.8.5" description = "A Python Parser" optional = false python-versions = ">=3.6" groups = ["jupyter"] files = [ - {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, - {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, + {file = "parso-0.8.5-py2.py3-none-any.whl", hash = "sha256:646204b5ee239c396d040b90f9e272e9a8017c630092bf59980beb62fd033887"}, + {file = "parso-0.8.5.tar.gz", hash = "sha256:034d7354a9a018bdce352f48b2a8a450f05e9d6ee85db84764e9b6bd96dafe5a"}, ] [package.extras] qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["docopt", "pytest"] -[[package]] -name = "pbr" -version = "7.0.0" -description = "Python Build Reasonableness" -optional = false -python-versions = ">=2.6" -groups = ["security"] -files = [ - {file = "pbr-7.0.0-py2.py3-none-any.whl", hash = "sha256:b447e63a2bc04fd975fc0480b8d5ebf979179e2c0ae203bf1eff9ea20073bc38"}, - {file = "pbr-7.0.0.tar.gz", hash = "sha256:cf4127298723dafbce3afd13775ccf3885be5d3c8435751b867f9a6a10b71a39"}, -] - -[package.dependencies] -setuptools = "*" - [[package]] name = "peewee" version = "3.18.2" @@ -2768,14 +2799,14 @@ xmp = ["defusedxml"] [[package]] name = "platformdirs" -version = "4.3.8" +version = "4.4.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" groups = ["main", "dev", "jupyter"] files = [ - {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, - {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, + {file = "platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85"}, + {file = "platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf"}, ] [package.extras] @@ -2859,14 +2890,14 @@ twisted = ["twisted"] [[package]] name = "prompt-toolkit" -version = "3.0.51" +version = "3.0.52" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.8" groups = ["jupyter"] files = [ - {file = "prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07"}, - {file = "prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed"}, + {file = "prompt_toolkit-3.0.52-py3-none-any.whl", hash = "sha256:9aac639a3bbd33284347de5ad8d68ecc044b91a762dc39b7c21095fcd6a19955"}, + {file = "prompt_toolkit-3.0.52.tar.gz", hash = "sha256:28cde192929c8e7321de85de1ddbe736f1375148b02f2e17edd840042b1be855"}, ] [package.dependencies] @@ -2874,46 +2905,52 @@ wcwidth = "*" [[package]] name = "protobuf" -version = "6.31.1" +version = "6.32.0" description = "" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9"}, - {file = "protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447"}, - {file = "protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402"}, - {file = "protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39"}, - {file = "protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6"}, - {file = "protobuf-6.31.1-cp39-cp39-win32.whl", hash = "sha256:0414e3aa5a5f3ff423828e1e6a6e907d6c65c1d5b7e6e975793d5590bdeecc16"}, - {file = "protobuf-6.31.1-cp39-cp39-win_amd64.whl", hash = "sha256:8764cf4587791e7564051b35524b72844f845ad0bb011704c3736cce762d8fe9"}, - {file = "protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e"}, - {file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"}, + {file = "protobuf-6.32.0-cp310-abi3-win32.whl", hash = "sha256:84f9e3c1ff6fb0308dbacb0950d8aa90694b0d0ee68e75719cb044b7078fe741"}, + {file = "protobuf-6.32.0-cp310-abi3-win_amd64.whl", hash = "sha256:a8bdbb2f009cfc22a36d031f22a625a38b615b5e19e558a7b756b3279723e68e"}, + {file = "protobuf-6.32.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d52691e5bee6c860fff9a1c86ad26a13afbeb4b168cd4445c922b7e2cf85aaf0"}, + {file = "protobuf-6.32.0-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:501fe6372fd1c8ea2a30b4d9be8f87955a64d6be9c88a973996cef5ef6f0abf1"}, + {file = "protobuf-6.32.0-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:75a2aab2bd1aeb1f5dc7c5f33bcb11d82ea8c055c9becbb41c26a8c43fd7092c"}, + {file = "protobuf-6.32.0-cp39-cp39-win32.whl", hash = "sha256:7db8ed09024f115ac877a1427557b838705359f047b2ff2f2b2364892d19dacb"}, + {file = "protobuf-6.32.0-cp39-cp39-win_amd64.whl", hash = "sha256:15eba1b86f193a407607112ceb9ea0ba9569aed24f93333fe9a497cf2fda37d3"}, + {file = "protobuf-6.32.0-py3-none-any.whl", hash = "sha256:ba377e5b67b908c8f3072a57b63e2c6a4cbd18aea4ed98d2584350dbf46f2783"}, + {file = "protobuf-6.32.0.tar.gz", hash = "sha256:a81439049127067fc49ec1d36e25c6ee1d1a2b7be930675f919258d03c04e7d2"}, ] [[package]] name = "psutil" -version = "7.0.0" -description = "Cross-platform lib for process and system monitoring in Python. NOTE: the syntax of this script MUST be kept compatible with Python 2.7." +version = "6.0.0" +description = "Cross-platform lib for process and system monitoring in Python." optional = false -python-versions = ">=3.6" -groups = ["jupyter"] +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +groups = ["jupyter", "security"] files = [ - {file = "psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25"}, - {file = "psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da"}, - {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91"}, - {file = "psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34"}, - {file = "psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993"}, - {file = "psutil-7.0.0-cp36-cp36m-win32.whl", hash = "sha256:84df4eb63e16849689f76b1ffcb36db7b8de703d1bc1fe41773db487621b6c17"}, - {file = "psutil-7.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:1e744154a6580bc968a0195fd25e80432d3afec619daf145b9e5ba16cc1d688e"}, - {file = "psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99"}, - {file = "psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553"}, - {file = "psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456"}, + {file = "psutil-6.0.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0"}, + {file = "psutil-6.0.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3"}, + {file = "psutil-6.0.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c"}, + {file = "psutil-6.0.0-cp27-none-win32.whl", hash = "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35"}, + {file = "psutil-6.0.0-cp27-none-win_amd64.whl", hash = "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1"}, + {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, + {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, + {file = "psutil-6.0.0-cp36-cp36m-win32.whl", hash = "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14"}, + {file = "psutil-6.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c"}, + {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, + {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, + {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, + {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, ] [package.extras] -dev = ["abi3audit", "black (==24.10.0)", "check-manifest", "coverage", "packaging", "pylint", "pyperf", "pypinfo", "pytest", "pytest-cov", "pytest-xdist", "requests", "rstcheck", "ruff", "setuptools", "sphinx", "sphinx_rtd_theme", "toml-sort", "twine", "virtualenv", "vulture", "wheel"] -test = ["pytest", "pytest-xdist", "setuptools"] +test = ["enum34 ; python_version <= \"3.4\"", "ipaddress ; python_version < \"3.0\"", "mock ; python_version < \"3.0\"", "pywin32 ; sys_platform == \"win32\"", "wmi ; sys_platform == \"win32\""] [[package]] name = "psycopg2-binary" @@ -3006,6 +3043,22 @@ files = [ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] +[[package]] +name = "pulp" +version = "3.2.2" +description = "PuLP is an LP modeler written in python. PuLP can generate MPS or LP files and call GLPK, COIN CLP/CBC, CPLEX, and GUROBI to solve linear problems." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pulp-3.2.2-py3-none-any.whl", hash = "sha256:d3ca5ff11a28b3e7b2508a992d7e51f3533471d89305f0560b5fe3b6cc821043"}, + {file = "pulp-3.2.2.tar.gz", hash = "sha256:389a6ff1dc34ec4b093f34f7a9fa3553743ff0ea99b2a423e9f0dd16940f63d2"}, +] + +[package.extras] +open-py = ["cylp ; sys_platform != \"win32\"", "highspy", "pyscipopt"] +public-py = ["coptpy", "gurobipy", "xpress"] + [[package]] name = "pure-eval" version = "0.2.3" @@ -3183,6 +3236,22 @@ files = [ [package.extras] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pyluach" +version = "2.2.0" +description = "A Python package for dealing with Hebrew (Jewish) calendar dates." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "pyluach-2.2.0-py3-none-any.whl", hash = "sha256:d1eb49d6292087e9290f4661ae01b60c8c933704ec8c9cef82673b349ff96adf"}, + {file = "pyluach-2.2.0.tar.gz", hash = "sha256:9063a25387cd7624276fd0656508bada08aa8a6f22e8db352844cd858e69012b"}, +] + +[package.extras] +doc = ["sphinx (>=6.1.3,<6.2.0)", "sphinx_rtd_theme (>=1.2.0,<1.3.0)"] +test = ["beautifulsoup4", "flake8", "pytest", "pytest-cov"] + [[package]] name = "pyparsing" version = "3.2.3" @@ -3396,104 +3465,104 @@ files = [ [[package]] name = "pyzmq" -version = "27.0.1" +version = "27.0.2" description = "Python bindings for 0MQ" optional = false python-versions = ">=3.8" groups = ["jupyter"] files = [ - {file = "pyzmq-27.0.1-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:90a4da42aa322de8a3522461e3b5fe999935763b27f69a02fced40f4e3cf9682"}, - {file = "pyzmq-27.0.1-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:e648dca28178fc879c814cf285048dd22fd1f03e1104101106505ec0eea50a4d"}, - {file = "pyzmq-27.0.1-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bca8abc31799a6f3652d13f47e0b0e1cab76f9125f2283d085a3754f669b607"}, - {file = "pyzmq-27.0.1-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:092f4011b26d6b0201002f439bd74b38f23f3aefcb358621bdc3b230afc9b2d5"}, - {file = "pyzmq-27.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6f02f30a4a6b3efe665ab13a3dd47109d80326c8fd286311d1ba9f397dc5f247"}, - {file = "pyzmq-27.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f293a1419266e3bf3557d1f8778f9e1ffe7e6b2c8df5c9dca191caf60831eb74"}, - {file = "pyzmq-27.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ce181dd1a7c6c012d0efa8ab603c34b5ee9d86e570c03415bbb1b8772eeb381c"}, - {file = "pyzmq-27.0.1-cp310-cp310-win32.whl", hash = "sha256:f65741cc06630652e82aa68ddef4986a3ab9073dd46d59f94ce5f005fa72037c"}, - {file = "pyzmq-27.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:44909aa3ed2234d69fe81e1dade7be336bcfeab106e16bdaa3318dcde4262b93"}, - {file = "pyzmq-27.0.1-cp310-cp310-win_arm64.whl", hash = "sha256:4401649bfa0a38f0f8777f8faba7cd7eb7b5b8ae2abc7542b830dd09ad4aed0d"}, - {file = "pyzmq-27.0.1-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:9729190bd770314f5fbba42476abf6abe79a746eeda11d1d68fd56dd70e5c296"}, - {file = "pyzmq-27.0.1-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:696900ef6bc20bef6a242973943574f96c3f97d2183c1bd3da5eea4f559631b1"}, - {file = "pyzmq-27.0.1-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f96a63aecec22d3f7fdea3c6c98df9e42973f5856bb6812c3d8d78c262fee808"}, - {file = "pyzmq-27.0.1-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c512824360ea7490390566ce00bee880e19b526b312b25cc0bc30a0fe95cb67f"}, - {file = "pyzmq-27.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:dfb2bb5e0f7198eaacfb6796fb0330afd28f36d985a770745fba554a5903595a"}, - {file = "pyzmq-27.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4f6886c59ba93ffde09b957d3e857e7950c8fe818bd5494d9b4287bc6d5bc7f1"}, - {file = "pyzmq-27.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b99ea9d330e86ce1ff7f2456b33f1bf81c43862a5590faf4ef4ed3a63504bdab"}, - {file = "pyzmq-27.0.1-cp311-cp311-win32.whl", hash = "sha256:571f762aed89025ba8cdcbe355fea56889715ec06d0264fd8b6a3f3fa38154ed"}, - {file = "pyzmq-27.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:ee16906c8025fa464bea1e48128c048d02359fb40bebe5333103228528506530"}, - {file = "pyzmq-27.0.1-cp311-cp311-win_arm64.whl", hash = "sha256:ba068f28028849da725ff9185c24f832ccf9207a40f9b28ac46ab7c04994bd41"}, - {file = "pyzmq-27.0.1-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:af7ebce2a1e7caf30c0bb64a845f63a69e76a2fadbc1cac47178f7bb6e657bdd"}, - {file = "pyzmq-27.0.1-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:8f617f60a8b609a13099b313e7e525e67f84ef4524b6acad396d9ff153f6e4cd"}, - {file = "pyzmq-27.0.1-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1d59dad4173dc2a111f03e59315c7bd6e73da1a9d20a84a25cf08325b0582b1a"}, - {file = "pyzmq-27.0.1-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f5b6133c8d313bde8bd0d123c169d22525300ff164c2189f849de495e1344577"}, - {file = "pyzmq-27.0.1-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:58cca552567423f04d06a075f4b473e78ab5bdb906febe56bf4797633f54aa4e"}, - {file = "pyzmq-27.0.1-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:4b9d8e26fb600d0d69cc9933e20af08552e97cc868a183d38a5c0d661e40dfbb"}, - {file = "pyzmq-27.0.1-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2329f0c87f0466dce45bba32b63f47018dda5ca40a0085cc5c8558fea7d9fc55"}, - {file = "pyzmq-27.0.1-cp312-abi3-win32.whl", hash = "sha256:57bb92abdb48467b89c2d21da1ab01a07d0745e536d62afd2e30d5acbd0092eb"}, - {file = "pyzmq-27.0.1-cp312-abi3-win_amd64.whl", hash = "sha256:ff3f8757570e45da7a5bedaa140489846510014f7a9d5ee9301c61f3f1b8a686"}, - {file = "pyzmq-27.0.1-cp312-abi3-win_arm64.whl", hash = "sha256:df2c55c958d3766bdb3e9d858b911288acec09a9aab15883f384fc7180df5bed"}, - {file = "pyzmq-27.0.1-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:497bd8af534ae55dc4ef67eebd1c149ff2a0b0f1e146db73c8b5a53d83c1a5f5"}, - {file = "pyzmq-27.0.1-cp313-cp313-android_24_x86_64.whl", hash = "sha256:a066ea6ad6218b4c233906adf0ae67830f451ed238419c0db609310dd781fbe7"}, - {file = "pyzmq-27.0.1-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:72d235d6365ca73d8ce92f7425065d70f5c1e19baa458eb3f0d570e425b73a96"}, - {file = "pyzmq-27.0.1-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:313a7b374e3dc64848644ca348a51004b41726f768b02e17e689f1322366a4d9"}, - {file = "pyzmq-27.0.1-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:119ce8590409702394f959c159d048002cbed2f3c0645ec9d6a88087fc70f0f1"}, - {file = "pyzmq-27.0.1-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:45c3e00ce16896ace2cd770ab9057a7cf97d4613ea5f2a13f815141d8b6894b9"}, - {file = "pyzmq-27.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:678e50ec112bdc6df5a83ac259a55a4ba97a8b314c325ab26b3b5b071151bc61"}, - {file = "pyzmq-27.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:d0b96c30be9f9387b18b18b6133c75a7b1b0065da64e150fe1feb5ebf31ece1c"}, - {file = "pyzmq-27.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88dc92d9eb5ea4968123e74db146d770b0c8d48f0e2bfb1dbc6c50a8edb12d64"}, - {file = "pyzmq-27.0.1-cp313-cp313t-win32.whl", hash = "sha256:6dcbcb34f5c9b0cefdfc71ff745459241b7d3cda5b27c7ad69d45afc0821d1e1"}, - {file = "pyzmq-27.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:b9fd0fda730461f510cfd9a40fafa5355d65f5e3dbdd8d6dfa342b5b3f5d1949"}, - {file = "pyzmq-27.0.1-cp313-cp313t-win_arm64.whl", hash = "sha256:56a3b1853f3954ec1f0e91085f1350cc57d18f11205e4ab6e83e4b7c414120e0"}, - {file = "pyzmq-27.0.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:f98f6b7787bd2beb1f0dde03f23a0621a0c978edf673b7d8f5e7bc039cbe1b60"}, - {file = "pyzmq-27.0.1-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:351bf5d8ca0788ca85327fda45843b6927593ff4c807faee368cc5aaf9f809c2"}, - {file = "pyzmq-27.0.1-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5268a5a9177afff53dc6d70dffe63114ba2a6e7b20d9411cc3adeba09eeda403"}, - {file = "pyzmq-27.0.1-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a4aca06ba295aa78bec9b33ec028d1ca08744c36294338c41432b7171060c808"}, - {file = "pyzmq-27.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1c363c6dc66352331d5ad64bb838765c6692766334a6a02fdb05e76bd408ae18"}, - {file = "pyzmq-27.0.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:87aebf4acd7249bdff8d3df03aed4f09e67078e6762cfe0aecf8d0748ff94cde"}, - {file = "pyzmq-27.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e4f22d67756518d71901edf73b38dc0eb4765cce22c8fe122cc81748d425262b"}, - {file = "pyzmq-27.0.1-cp314-cp314t-win32.whl", hash = "sha256:8c62297bc7aea2147b472ca5ca2b4389377ad82898c87cabab2a94aedd75e337"}, - {file = "pyzmq-27.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:bee5248d5ec9223545f8cc4f368c2d571477ae828c99409125c3911511d98245"}, - {file = "pyzmq-27.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:0fc24bf45e4a454e55ef99d7f5c8b8712539200ce98533af25a5bfa954b6b390"}, - {file = "pyzmq-27.0.1-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:9d16fdfd7d70a6b0ca45d36eb19f7702fa77ef6256652f17594fc9ce534c9da6"}, - {file = "pyzmq-27.0.1-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:d0356a21e58c3e99248930ff73cc05b1d302ff50f41a8a47371aefb04327378a"}, - {file = "pyzmq-27.0.1-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a27fa11ebaccc099cac4309c799aa33919671a7660e29b3e465b7893bc64ec81"}, - {file = "pyzmq-27.0.1-cp38-cp38-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b25e72e115399a4441aad322258fa8267b873850dc7c276e3f874042728c2b45"}, - {file = "pyzmq-27.0.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:f8c3b74f1cd577a5a9253eae7ed363f88cbb345a990ca3027e9038301d47c7f4"}, - {file = "pyzmq-27.0.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:19dce6c93656f9c469540350d29b128cd8ba55b80b332b431b9a1e9ff74cfd01"}, - {file = "pyzmq-27.0.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:da81512b83032ed6cdf85ca62e020b4c23dda87f1b6c26b932131222ccfdbd27"}, - {file = "pyzmq-27.0.1-cp38-cp38-win32.whl", hash = "sha256:7418fb5736d0d39b3ecc6bec4ff549777988feb260f5381636d8bd321b653038"}, - {file = "pyzmq-27.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:af2ee67b3688b067e20fea3fe36b823a362609a1966e7e7a21883ae6da248804"}, - {file = "pyzmq-27.0.1-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:05a94233fdde585eb70924a6e4929202a747eea6ed308a6171c4f1c715bbe39e"}, - {file = "pyzmq-27.0.1-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:c96702e1082eab62ae583d64c4e19c9b848359196697e536a0c57ae9bd165bd5"}, - {file = "pyzmq-27.0.1-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c9180d1f5b4b73e28b64e63cc6c4c097690f102aa14935a62d5dd7426a4e5b5a"}, - {file = "pyzmq-27.0.1-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e971d8680003d0af6020713e52f92109b46fedb463916e988814e04c8133578a"}, - {file = "pyzmq-27.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fe632fa4501154d58dfbe1764a0495734d55f84eaf1feda4549a1f1ca76659e9"}, - {file = "pyzmq-27.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:4c3874344fd5fa6d58bb51919708048ac4cab21099f40a227173cddb76b4c20b"}, - {file = "pyzmq-27.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:0ec09073ed67ae236785d543df3b322282acc0bdf6d1b748c3e81f3043b21cb5"}, - {file = "pyzmq-27.0.1-cp39-cp39-win32.whl", hash = "sha256:f44e7ea288d022d4bf93b9e79dafcb4a7aea45a3cbeae2116792904931cefccf"}, - {file = "pyzmq-27.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:ffe6b809a97ac6dea524b3b837d5b28743d8c2f121141056d168ff0ba8f614ef"}, - {file = "pyzmq-27.0.1-cp39-cp39-win_arm64.whl", hash = "sha256:fde26267416c8478c95432c81489b53f57b0b5d24cd5c8bfaebf5bbaac4dc90c"}, - {file = "pyzmq-27.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:544b995a6a1976fad5d7ff01409b4588f7608ccc41be72147700af91fd44875d"}, - {file = "pyzmq-27.0.1-pp310-pypy310_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:0f772eea55cccce7f45d6ecdd1d5049c12a77ec22404f6b892fae687faa87bee"}, - {file = "pyzmq-27.0.1-pp310-pypy310_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c9d63d66059114a6756d09169c9209ffceabacb65b9cb0f66e6fc344b20b73e6"}, - {file = "pyzmq-27.0.1-pp310-pypy310_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1da8e645c655d86f0305fb4c65a0d848f461cd90ee07d21f254667287b5dbe50"}, - {file = "pyzmq-27.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1843fd0daebcf843fe6d4da53b8bdd3fc906ad3e97d25f51c3fed44436d82a49"}, - {file = "pyzmq-27.0.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7fb0ee35845bef1e8c4a152d766242164e138c239e3182f558ae15cb4a891f94"}, - {file = "pyzmq-27.0.1-pp311-pypy311_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f379f11e138dfd56c3f24a04164f871a08281194dd9ddf656a278d7d080c8ad0"}, - {file = "pyzmq-27.0.1-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b978c0678cffbe8860ec9edc91200e895c29ae1ac8a7085f947f8e8864c489fb"}, - {file = "pyzmq-27.0.1-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ebccf0d760bc92a4a7c751aeb2fef6626144aace76ee8f5a63abeb100cae87f"}, - {file = "pyzmq-27.0.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:77fed80e30fa65708546c4119840a46691290efc231f6bfb2ac2a39b52e15811"}, - {file = "pyzmq-27.0.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:9d7b6b90da7285642f480b48c9efd1d25302fd628237d8f6f6ee39ba6b2d2d34"}, - {file = "pyzmq-27.0.1-pp38-pypy38_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:d2976b7079f09f48d59dc123293ed6282fca6ef96a270f4ea0364e4e54c8e855"}, - {file = "pyzmq-27.0.1-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2852f67371918705cc18b321695f75c5d653d5d8c4a9b946c1eec4dab2bd6fdf"}, - {file = "pyzmq-27.0.1-pp38-pypy38_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:be45a895f98877271e8a0b6cf40925e0369121ce423421c20fa6d7958dc753c2"}, - {file = "pyzmq-27.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:64ca3c7c614aefcdd5e358ecdd41d1237c35fe1417d01ec0160e7cdb0a380edc"}, - {file = "pyzmq-27.0.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d97b59cbd8a6c8b23524a8ce237ff9504d987dc07156258aa68ae06d2dd5f34d"}, - {file = "pyzmq-27.0.1-pp39-pypy39_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:27a78bdd384dbbe7b357af95f72efe8c494306b5ec0a03c31e2d53d6763e5307"}, - {file = "pyzmq-27.0.1-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b007e5dcba684e888fbc90554cb12a2f4e492927c8c2761a80b7590209821743"}, - {file = "pyzmq-27.0.1-pp39-pypy39_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:95594b2ceeaa94934e3e94dd7bf5f3c3659cf1a26b1fb3edcf6e42dad7e0eaf2"}, - {file = "pyzmq-27.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:70b719a130b81dd130a57ac0ff636dc2c0127c5b35ca5467d1b67057e3c7a4d2"}, - {file = "pyzmq-27.0.1.tar.gz", hash = "sha256:45c549204bc20e7484ffd2555f6cf02e572440ecf2f3bdd60d4404b20fddf64b"}, + {file = "pyzmq-27.0.2-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:8b32c4636ced87dce0ac3d671e578b3400215efab372f1b4be242e8cf0b11384"}, + {file = "pyzmq-27.0.2-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f9528a4b3e24189cb333a9850fddbbafaa81df187297cfbddee50447cdb042cf"}, + {file = "pyzmq-27.0.2-cp310-cp310-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3b02ba0c0b2b9ebe74688002e6c56c903429924a25630804b9ede1f178aa5a3f"}, + {file = "pyzmq-27.0.2-cp310-cp310-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e4dc5c9a6167617251dea0d024d67559795761aabb4b7ea015518be898be076"}, + {file = "pyzmq-27.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f1151b33aaf3b4fa9da26f4d696e38eebab67d1b43c446184d733c700b3ff8ce"}, + {file = "pyzmq-27.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:4ecfc7999ac44c9ef92b5ae8f0b44fb935297977df54d8756b195a3cd12f38f0"}, + {file = "pyzmq-27.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:31c26a5d0b00befcaeeb600d8b15ad09f5604b6f44e2057ec5e521a9e18dcd9a"}, + {file = "pyzmq-27.0.2-cp310-cp310-win32.whl", hash = "sha256:25a100d2de2ac0c644ecf4ce0b509a720d12e559c77aff7e7e73aa684f0375bc"}, + {file = "pyzmq-27.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a1acf091f53bb406e9e5e7383e467d1dd1b94488b8415b890917d30111a1fef3"}, + {file = "pyzmq-27.0.2-cp310-cp310-win_arm64.whl", hash = "sha256:b38e01f11e9e95f6668dc8a62dccf9483f454fed78a77447507a0e8dcbd19a63"}, + {file = "pyzmq-27.0.2-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:063845960df76599ad4fad69fa4d884b3ba38304272104fdcd7e3af33faeeb1d"}, + {file = "pyzmq-27.0.2-cp311-cp311-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:845a35fb21b88786aeb38af8b271d41ab0967985410f35411a27eebdc578a076"}, + {file = "pyzmq-27.0.2-cp311-cp311-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:515d20b5c3c86db95503faa989853a8ab692aab1e5336db011cd6d35626c4cb1"}, + {file = "pyzmq-27.0.2-cp311-cp311-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:862aedec0b0684a5050cdb5ec13c2da96d2f8dffda48657ed35e312a4e31553b"}, + {file = "pyzmq-27.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2cb5bcfc51c7a4fce335d3bc974fd1d6a916abbcdd2b25f6e89d37b8def25f57"}, + {file = "pyzmq-27.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:38ff75b2a36e3a032e9fef29a5871e3e1301a37464e09ba364e3c3193f62982a"}, + {file = "pyzmq-27.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7a5709abe8d23ca158a9d0a18c037f4193f5b6afeb53be37173a41e9fb885792"}, + {file = "pyzmq-27.0.2-cp311-cp311-win32.whl", hash = "sha256:47c5dda2018c35d87be9b83de0890cb92ac0791fd59498847fc4eca6ff56671d"}, + {file = "pyzmq-27.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:f54ca3e98f8f4d23e989c7d0edcf9da7a514ff261edaf64d1d8653dd5feb0a8b"}, + {file = "pyzmq-27.0.2-cp311-cp311-win_arm64.whl", hash = "sha256:2ef3067cb5b51b090fb853f423ad7ed63836ec154374282780a62eb866bf5768"}, + {file = "pyzmq-27.0.2-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:5da05e3c22c95e23bfc4afeee6ff7d4be9ff2233ad6cb171a0e8257cd46b169a"}, + {file = "pyzmq-27.0.2-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:4e4520577971d01d47e2559bb3175fce1be9103b18621bf0b241abe0a933d040"}, + {file = "pyzmq-27.0.2-cp312-abi3-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:56d7de7bf73165b90bd25a8668659ccb134dd28449116bf3c7e9bab5cf8a8ec9"}, + {file = "pyzmq-27.0.2-cp312-abi3-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:340e7cddc32f147c6c00d116a3f284ab07ee63dbd26c52be13b590520434533c"}, + {file = "pyzmq-27.0.2-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba95693f9df8bb4a9826464fb0fe89033936f35fd4a8ff1edff09a473570afa0"}, + {file = "pyzmq-27.0.2-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:ca42a6ce2d697537da34f77a1960d21476c6a4af3e539eddb2b114c3cf65a78c"}, + {file = "pyzmq-27.0.2-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3e44e665d78a07214b2772ccbd4b9bcc6d848d7895f1b2d7653f047b6318a4f6"}, + {file = "pyzmq-27.0.2-cp312-abi3-win32.whl", hash = "sha256:272d772d116615397d2be2b1417b3b8c8bc8671f93728c2f2c25002a4530e8f6"}, + {file = "pyzmq-27.0.2-cp312-abi3-win_amd64.whl", hash = "sha256:734be4f44efba0aa69bf5f015ed13eb69ff29bf0d17ea1e21588b095a3147b8e"}, + {file = "pyzmq-27.0.2-cp312-abi3-win_arm64.whl", hash = "sha256:41f0bd56d9279392810950feb2785a419c2920bbf007fdaaa7f4a07332ae492d"}, + {file = "pyzmq-27.0.2-cp313-cp313-android_24_arm64_v8a.whl", hash = "sha256:7f01118133427cd7f34ee133b5098e2af5f70303fa7519785c007bca5aa6f96a"}, + {file = "pyzmq-27.0.2-cp313-cp313-android_24_x86_64.whl", hash = "sha256:e4b860edf6379a7234ccbb19b4ed2c57e3ff569c3414fadfb49ae72b61a8ef07"}, + {file = "pyzmq-27.0.2-cp313-cp313t-macosx_10_15_universal2.whl", hash = "sha256:cb77923ea163156da14295c941930bd525df0d29c96c1ec2fe3c3806b1e17cb3"}, + {file = "pyzmq-27.0.2-cp313-cp313t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:61678b7407b04df8f9423f188156355dc94d0fb52d360ae79d02ed7e0d431eea"}, + {file = "pyzmq-27.0.2-cp313-cp313t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e3c824b70925963bdc8e39a642672c15ffaa67e7d4b491f64662dd56d6271263"}, + {file = "pyzmq-27.0.2-cp313-cp313t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c4833e02fcf2751975457be1dfa2f744d4d09901a8cc106acaa519d868232175"}, + {file = "pyzmq-27.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b18045668d09cf0faa44918af2a67f0dbbef738c96f61c2f1b975b1ddb92ccfc"}, + {file = "pyzmq-27.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:bbbb7e2f3ac5a22901324e7b086f398b8e16d343879a77b15ca3312e8cd8e6d5"}, + {file = "pyzmq-27.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:b751914a73604d40d88a061bab042a11d4511b3ddbb7624cd83c39c8a498564c"}, + {file = "pyzmq-27.0.2-cp313-cp313t-win32.whl", hash = "sha256:3e8f833dd82af11db5321c414638045c70f61009f72dd61c88db4a713c1fb1d2"}, + {file = "pyzmq-27.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:5b45153cb8eadcab14139970643a84f7a7b08dda541fbc1f6f4855c49334b549"}, + {file = "pyzmq-27.0.2-cp313-cp313t-win_arm64.whl", hash = "sha256:86898f5c9730df23427c1ee0097d8aa41aa5f89539a79e48cd0d2c22d059f1b7"}, + {file = "pyzmq-27.0.2-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:d2b4b261dce10762be5c116b6ad1f267a9429765b493c454f049f33791dd8b8a"}, + {file = "pyzmq-27.0.2-cp314-cp314t-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:4e4d88b6cff156fed468903006b24bbd85322612f9c2f7b96e72d5016fd3f543"}, + {file = "pyzmq-27.0.2-cp314-cp314t-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8426c0ebbc11ed8416a6e9409c194142d677c2c5c688595f2743664e356d9e9b"}, + {file = "pyzmq-27.0.2-cp314-cp314t-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:565bee96a155fe6452caed5fb5f60c9862038e6b51a59f4f632562081cdb4004"}, + {file = "pyzmq-27.0.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5de735c745ca5cefe9c2d1547d8f28cfe1b1926aecb7483ab1102fd0a746c093"}, + {file = "pyzmq-27.0.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ea4f498f8115fd90d7bf03a3e83ae3e9898e43362f8e8e8faec93597206e15cc"}, + {file = "pyzmq-27.0.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d00e81cb0afd672915257a3927124ee2ad117ace3c256d39cd97ca3f190152ad"}, + {file = "pyzmq-27.0.2-cp314-cp314t-win32.whl", hash = "sha256:0f6e9b00d81b58f859fffc112365d50413954e02aefe36c5b4c8fb4af79f8cc3"}, + {file = "pyzmq-27.0.2-cp314-cp314t-win_amd64.whl", hash = "sha256:2e73cf3b127a437fef4100eb3ac2ebe6b49e655bb721329f667f59eca0a26221"}, + {file = "pyzmq-27.0.2-cp314-cp314t-win_arm64.whl", hash = "sha256:4108785f2e5ac865d06f678a07a1901e3465611356df21a545eeea8b45f56265"}, + {file = "pyzmq-27.0.2-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:59a50f5eedf8ed20b7dbd57f1c29b2de003940dea3eedfbf0fbfea05ee7f9f61"}, + {file = "pyzmq-27.0.2-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:a00e6390e52770ba1ec753b2610f90b4f00e74c71cfc5405b917adf3cc39565e"}, + {file = "pyzmq-27.0.2-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:49d8d05d9844d83cddfbc86a82ac0cafe7ab694fcc9c9618de8d015c318347c3"}, + {file = "pyzmq-27.0.2-cp38-cp38-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3660d85e2b6a28eb2d586dedab9c61a7b7c64ab0d89a35d2973c7be336f12b0d"}, + {file = "pyzmq-27.0.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:bccfee44b392f4d13bbf05aa88d8f7709271b940a8c398d4216fde6b717624ae"}, + {file = "pyzmq-27.0.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:989066d51686415f1da646d6e2c5364a9b084777c29d9d1720aa5baf192366ef"}, + {file = "pyzmq-27.0.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cc283595b82f0db155a52f6462945c7b6b47ecaae2f681746eeea537c95cf8c9"}, + {file = "pyzmq-27.0.2-cp38-cp38-win32.whl", hash = "sha256:ad38daf57495beadc0d929e8901b2aa46ff474239b5a8a46ccc7f67dc01d2335"}, + {file = "pyzmq-27.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:36508466a266cf78bba2f56529ad06eb38ba827f443b47388d420bec14d331ba"}, + {file = "pyzmq-27.0.2-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:aa9c1c208c263b84386ac25bed6af5672397dc3c232638114fc09bca5c7addf9"}, + {file = "pyzmq-27.0.2-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:795c4884cfe7ea59f2b67d82b417e899afab889d332bfda13b02f8e0c155b2e4"}, + {file = "pyzmq-27.0.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47eb65bb25478358ba3113dd9a08344f616f417ad3ffcbb190cd874fae72b1b1"}, + {file = "pyzmq-27.0.2-cp39-cp39-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6fc24f00293f10aff04d55ca37029b280474c91f4de2cad5e911e5e10d733b7"}, + {file = "pyzmq-27.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:58d4cc9b6b768478adfc40a5cbee545303db8dbc81ba688474e0f499cc581028"}, + {file = "pyzmq-27.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:cea2f26c5972796e02b222968a21a378d09eb4ff590eb3c5fafa8913f8c2bdf5"}, + {file = "pyzmq-27.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a0621ec020c49fc1b6e31304f1a820900d54e7d9afa03ea1634264bf9387519e"}, + {file = "pyzmq-27.0.2-cp39-cp39-win32.whl", hash = "sha256:1326500792a9cb0992db06bbaf5d0098459133868932b81a6e90d45c39eca99d"}, + {file = "pyzmq-27.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:5ee9560cb1e3094ef01fc071b361121a57ebb8d4232912b6607a6d7d2d0a97b4"}, + {file = "pyzmq-27.0.2-cp39-cp39-win_arm64.whl", hash = "sha256:85e3c6fb0d25ea046ebcfdc2bcb9683d663dc0280645c79a616ff5077962a15b"}, + {file = "pyzmq-27.0.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:d67a0960803a37b60f51b460c58444bc7033a804c662f5735172e21e74ee4902"}, + {file = "pyzmq-27.0.2-pp310-pypy310_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:dd4d3e6a567ffd0d232cfc667c49d0852d0ee7481458a2a1593b9b1bc5acba88"}, + {file = "pyzmq-27.0.2-pp310-pypy310_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5e558be423631704803bc6a642e2caa96083df759e25fe6eb01f2d28725f80bd"}, + {file = "pyzmq-27.0.2-pp310-pypy310_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c4c20ba8389f495c7b4f6b896bb1ca1e109a157d4f189267a902079699aaf787"}, + {file = "pyzmq-27.0.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c5be232f7219414ff672ff7ab8c5a7e8632177735186d8a42b57b491fafdd64e"}, + {file = "pyzmq-27.0.2-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:e297784aea724294fe95e442e39a4376c2f08aa4fae4161c669f047051e31b02"}, + {file = "pyzmq-27.0.2-pp311-pypy311_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:e3659a79ded9745bc9c2aef5b444ac8805606e7bc50d2d2eb16dc3ab5483d91f"}, + {file = "pyzmq-27.0.2-pp311-pypy311_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3dba49ff037d02373a9306b58d6c1e0be031438f822044e8767afccfdac4c6b"}, + {file = "pyzmq-27.0.2-pp311-pypy311_pp73-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de84e1694f9507b29e7b263453a2255a73e3d099d258db0f14539bad258abe41"}, + {file = "pyzmq-27.0.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f0944d65ba2b872b9fcece08411d6347f15a874c775b4c3baae7f278550da0fb"}, + {file = "pyzmq-27.0.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:05288947797dcd6724702db2056972dceef9963a83041eb734aea504416094ec"}, + {file = "pyzmq-27.0.2-pp38-pypy38_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:dff9198adbb6810ad857f3bfa59b4859c45acb02b0d198b39abeafb9148474f3"}, + {file = "pyzmq-27.0.2-pp38-pypy38_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:849123fd9982c7f63911fdceba9870f203f0f32c953a3bab48e7f27803a0e3ec"}, + {file = "pyzmq-27.0.2-pp38-pypy38_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c5ee06945f3069e3609819890a01958c4bbfea7a2b31ae87107c6478838d309e"}, + {file = "pyzmq-27.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6156ad5e8bbe8a78a3f5b5757c9a883b0012325c83f98ce6d58fcec81e8b3d06"}, + {file = "pyzmq-27.0.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:400f34321e3bd89b1165b91ea6b18ad26042ba9ad0dfed8b35049e2e24eeab9b"}, + {file = "pyzmq-27.0.2-pp39-pypy39_pp73-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9cbad4ef12e4c15c94d2c24ecd15a8ed56bf091c62f121a2b0c618ddd4b7402b"}, + {file = "pyzmq-27.0.2-pp39-pypy39_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6b2b74aac3392b8cf508ccb68c980a8555298cd378434a2d065d6ce0f4211dff"}, + {file = "pyzmq-27.0.2-pp39-pypy39_pp73-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7db5db88c24cf9253065d69229a148ff60821e5d6f8ff72579b1f80f8f348bab"}, + {file = "pyzmq-27.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8ffe40c216c41756ca05188c3e24a23142334b304f7aebd75c24210385e35573"}, + {file = "pyzmq-27.0.2.tar.gz", hash = "sha256:b398dd713b18de89730447347e96a0240225e154db56e35b6bb8447ffdb07798"}, ] [package.dependencies] @@ -3518,14 +3587,14 @@ typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""} [[package]] name = "requests" -version = "2.32.4" +version = "2.32.5" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main", "jupyter", "security"] files = [ - {file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"}, - {file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"}, + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, ] [package.dependencies] @@ -3604,167 +3673,167 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.27.0" +version = "0.27.1" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.9" groups = ["jupyter"] files = [ - {file = "rpds_py-0.27.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:130c1ffa5039a333f5926b09e346ab335f0d4ec393b030a18549a7c7e7c2cea4"}, - {file = "rpds_py-0.27.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a4cf32a26fa744101b67bfd28c55d992cd19438aff611a46cac7f066afca8fd4"}, - {file = "rpds_py-0.27.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64a0fe3f334a40b989812de70160de6b0ec7e3c9e4a04c0bbc48d97c5d3600ae"}, - {file = "rpds_py-0.27.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a0ff7ee28583ab30a52f371b40f54e7138c52ca67f8ca17ccb7ccf0b383cb5f"}, - {file = "rpds_py-0.27.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:15ea4d2e182345dd1b4286593601d766411b43f868924afe297570658c31a62b"}, - {file = "rpds_py-0.27.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36184b44bf60a480863e51021c26aca3dfe8dd2f5eeabb33622b132b9d8b8b54"}, - {file = "rpds_py-0.27.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b78430703cfcf5f5e86eb74027a1ed03a93509273d7c705babb547f03e60016"}, - {file = "rpds_py-0.27.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:dbd749cff1defbde270ca346b69b3baf5f1297213ef322254bf2a28537f0b046"}, - {file = "rpds_py-0.27.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bde37765564cd22a676dd8101b657839a1854cfaa9c382c5abf6ff7accfd4ae"}, - {file = "rpds_py-0.27.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:1d66f45b9399036e890fb9c04e9f70c33857fd8f58ac8db9f3278cfa835440c3"}, - {file = "rpds_py-0.27.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d85d784c619370d9329bbd670f41ff5f2ae62ea4519761b679d0f57f0f0ee267"}, - {file = "rpds_py-0.27.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5df559e9e7644d9042f626f2c3997b555f347d7a855a15f170b253f6c5bfe358"}, - {file = "rpds_py-0.27.0-cp310-cp310-win32.whl", hash = "sha256:b8a4131698b6992b2a56015f51646711ec5d893a0b314a4b985477868e240c87"}, - {file = "rpds_py-0.27.0-cp310-cp310-win_amd64.whl", hash = "sha256:cbc619e84a5e3ab2d452de831c88bdcad824414e9c2d28cd101f94dbdf26329c"}, - {file = "rpds_py-0.27.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:dbc2ab5d10544eb485baa76c63c501303b716a5c405ff2469a1d8ceffaabf622"}, - {file = "rpds_py-0.27.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7ec85994f96a58cf7ed288caa344b7fe31fd1d503bdf13d7331ead5f70ab60d5"}, - {file = "rpds_py-0.27.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:190d7285cd3bb6d31d37a0534d7359c1ee191eb194c511c301f32a4afa5a1dd4"}, - {file = "rpds_py-0.27.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c10d92fb6d7fd827e44055fcd932ad93dac6a11e832d51534d77b97d1d85400f"}, - {file = "rpds_py-0.27.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dd2c1d27ebfe6a015cfa2005b7fe8c52d5019f7bbdd801bc6f7499aab9ae739e"}, - {file = "rpds_py-0.27.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4790c9d5dd565ddb3e9f656092f57268951398cef52e364c405ed3112dc7c7c1"}, - {file = "rpds_py-0.27.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4300e15e7d03660f04be84a125d1bdd0e6b2f674bc0723bc0fd0122f1a4585dc"}, - {file = "rpds_py-0.27.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:59195dc244fc183209cf8a93406889cadde47dfd2f0a6b137783aa9c56d67c85"}, - {file = "rpds_py-0.27.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fae4a01ef8c4cb2bbe92ef2063149596907dc4a881a8d26743b3f6b304713171"}, - {file = "rpds_py-0.27.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e3dc8d4ede2dbae6c0fc2b6c958bf51ce9fd7e9b40c0f5b8835c3fde44f5807d"}, - {file = "rpds_py-0.27.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c3782fb753aa825b4ccabc04292e07897e2fd941448eabf666856c5530277626"}, - {file = "rpds_py-0.27.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:887ab1f12b0d227e9260558a4a2320024b20102207ada65c43e1ffc4546df72e"}, - {file = "rpds_py-0.27.0-cp311-cp311-win32.whl", hash = "sha256:5d6790ff400254137b81b8053b34417e2c46921e302d655181d55ea46df58cf7"}, - {file = "rpds_py-0.27.0-cp311-cp311-win_amd64.whl", hash = "sha256:e24d8031a2c62f34853756d9208eeafa6b940a1efcbfe36e8f57d99d52bb7261"}, - {file = "rpds_py-0.27.0-cp311-cp311-win_arm64.whl", hash = "sha256:08680820d23df1df0a0260f714d12966bc6c42d02e8055a91d61e03f0c47dda0"}, - {file = "rpds_py-0.27.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:19c990fdf5acecbf0623e906ae2e09ce1c58947197f9bced6bbd7482662231c4"}, - {file = "rpds_py-0.27.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6c27a7054b5224710fcfb1a626ec3ff4f28bcb89b899148c72873b18210e446b"}, - {file = "rpds_py-0.27.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09965b314091829b378b60607022048953e25f0b396c2b70e7c4c81bcecf932e"}, - {file = "rpds_py-0.27.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:14f028eb47f59e9169bfdf9f7ceafd29dd64902141840633683d0bad5b04ff34"}, - {file = "rpds_py-0.27.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6168af0be75bba990a39f9431cdfae5f0ad501f4af32ae62e8856307200517b8"}, - {file = "rpds_py-0.27.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab47fe727c13c09d0e6f508e3a49e545008e23bf762a245b020391b621f5b726"}, - {file = "rpds_py-0.27.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fa01b3d5e3b7d97efab65bd3d88f164e289ec323a8c033c5c38e53ee25c007e"}, - {file = "rpds_py-0.27.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:6c135708e987f46053e0a1246a206f53717f9fadfba27174a9769ad4befba5c3"}, - {file = "rpds_py-0.27.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc327f4497b7087d06204235199daf208fd01c82d80465dc5efa4ec9df1c5b4e"}, - {file = "rpds_py-0.27.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7e57906e38583a2cba67046a09c2637e23297618dc1f3caddbc493f2be97c93f"}, - {file = "rpds_py-0.27.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f4f69d7a4300fbf91efb1fb4916421bd57804c01ab938ab50ac9c4aa2212f03"}, - {file = "rpds_py-0.27.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b4c4fbbcff474e1e5f38be1bf04511c03d492d42eec0babda5d03af3b5589374"}, - {file = "rpds_py-0.27.0-cp312-cp312-win32.whl", hash = "sha256:27bac29bbbf39601b2aab474daf99dbc8e7176ca3389237a23944b17f8913d97"}, - {file = "rpds_py-0.27.0-cp312-cp312-win_amd64.whl", hash = "sha256:8a06aa1197ec0281eb1d7daf6073e199eb832fe591ffa329b88bae28f25f5fe5"}, - {file = "rpds_py-0.27.0-cp312-cp312-win_arm64.whl", hash = "sha256:e14aab02258cb776a108107bd15f5b5e4a1bbaa61ef33b36693dfab6f89d54f9"}, - {file = "rpds_py-0.27.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:443d239d02d9ae55b74015234f2cd8eb09e59fbba30bf60baeb3123ad4c6d5ff"}, - {file = "rpds_py-0.27.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b8a7acf04fda1f30f1007f3cc96d29d8cf0a53e626e4e1655fdf4eabc082d367"}, - {file = "rpds_py-0.27.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d0f92b78cfc3b74a42239fdd8c1266f4715b573204c234d2f9fc3fc7a24f185"}, - {file = "rpds_py-0.27.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ce4ed8e0c7dbc5b19352b9c2c6131dd23b95fa8698b5cdd076307a33626b72dc"}, - {file = "rpds_py-0.27.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fde355b02934cc6b07200cc3b27ab0c15870a757d1a72fd401aa92e2ea3c6bfe"}, - {file = "rpds_py-0.27.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13bbc4846ae4c993f07c93feb21a24d8ec637573d567a924b1001e81c8ae80f9"}, - {file = "rpds_py-0.27.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0744661afbc4099fef7f4e604e7f1ea1be1dd7284f357924af12a705cc7d5c"}, - {file = "rpds_py-0.27.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:069e0384a54f427bd65d7fda83b68a90606a3835901aaff42185fcd94f5a9295"}, - {file = "rpds_py-0.27.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4bc262ace5a1a7dc3e2eac2fa97b8257ae795389f688b5adf22c5db1e2431c43"}, - {file = "rpds_py-0.27.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2fe6e18e5c8581f0361b35ae575043c7029d0a92cb3429e6e596c2cdde251432"}, - {file = "rpds_py-0.27.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d93ebdb82363d2e7bec64eecdc3632b59e84bd270d74fe5be1659f7787052f9b"}, - {file = "rpds_py-0.27.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0954e3a92e1d62e83a54ea7b3fdc9efa5d61acef8488a8a3d31fdafbfb00460d"}, - {file = "rpds_py-0.27.0-cp313-cp313-win32.whl", hash = "sha256:2cff9bdd6c7b906cc562a505c04a57d92e82d37200027e8d362518df427f96cd"}, - {file = "rpds_py-0.27.0-cp313-cp313-win_amd64.whl", hash = "sha256:dc79d192fb76fc0c84f2c58672c17bbbc383fd26c3cdc29daae16ce3d927e8b2"}, - {file = "rpds_py-0.27.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b3a5c8089eed498a3af23ce87a80805ff98f6ef8f7bdb70bd1b7dae5105f6ac"}, - {file = "rpds_py-0.27.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:90fb790138c1a89a2e58c9282fe1089638401f2f3b8dddd758499041bc6e0774"}, - {file = "rpds_py-0.27.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:010c4843a3b92b54373e3d2291a7447d6c3fc29f591772cc2ea0e9f5c1da434b"}, - {file = "rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9ce7a9e967afc0a2af7caa0d15a3e9c1054815f73d6a8cb9225b61921b419bd"}, - {file = "rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa0bf113d15e8abdfee92aa4db86761b709a09954083afcb5bf0f952d6065fdb"}, - {file = "rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb91d252b35004a84670dfeafadb042528b19842a0080d8b53e5ec1128e8f433"}, - {file = "rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db8a6313dbac934193fc17fe7610f70cd8181c542a91382531bef5ed785e5615"}, - {file = "rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce96ab0bdfcef1b8c371ada2100767ace6804ea35aacce0aef3aeb4f3f499ca8"}, - {file = "rpds_py-0.27.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:7451ede3560086abe1aa27dcdcf55cd15c96b56f543fb12e5826eee6f721f858"}, - {file = "rpds_py-0.27.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:32196b5a99821476537b3f7732432d64d93a58d680a52c5e12a190ee0135d8b5"}, - {file = "rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a029be818059870664157194e46ce0e995082ac49926f1423c1f058534d2aaa9"}, - {file = "rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3841f66c1ffdc6cebce8aed64e36db71466f1dc23c0d9a5592e2a782a3042c79"}, - {file = "rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:42894616da0fc0dcb2ec08a77896c3f56e9cb2f4b66acd76fc8992c3557ceb1c"}, - {file = "rpds_py-0.27.0-cp313-cp313t-win32.whl", hash = "sha256:b1fef1f13c842a39a03409e30ca0bf87b39a1e2a305a9924deadb75a43105d23"}, - {file = "rpds_py-0.27.0-cp313-cp313t-win_amd64.whl", hash = "sha256:183f5e221ba3e283cd36fdfbe311d95cd87699a083330b4f792543987167eff1"}, - {file = "rpds_py-0.27.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:f3cd110e02c5bf17d8fb562f6c9df5c20e73029d587cf8602a2da6c5ef1e32cb"}, - {file = "rpds_py-0.27.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8d0e09cf4863c74106b5265c2c310f36146e2b445ff7b3018a56799f28f39f6f"}, - {file = "rpds_py-0.27.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f689ab822f9b5eb6dfc69893b4b9366db1d2420f7db1f6a2adf2a9ca15ad64"}, - {file = "rpds_py-0.27.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e36c80c49853b3ffda7aa1831bf175c13356b210c73128c861f3aa93c3cc4015"}, - {file = "rpds_py-0.27.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6de6a7f622860af0146cb9ee148682ff4d0cea0b8fd3ad51ce4d40efb2f061d0"}, - {file = "rpds_py-0.27.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4045e2fc4b37ec4b48e8907a5819bdd3380708c139d7cc358f03a3653abedb89"}, - {file = "rpds_py-0.27.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da162b718b12c4219eeeeb68a5b7552fbc7aadedf2efee440f88b9c0e54b45d"}, - {file = "rpds_py-0.27.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:0665be515767dc727ffa5f74bd2ef60b0ff85dad6bb8f50d91eaa6b5fb226f51"}, - {file = "rpds_py-0.27.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:203f581accef67300a942e49a37d74c12ceeef4514874c7cede21b012613ca2c"}, - {file = "rpds_py-0.27.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7873b65686a6471c0037139aa000d23fe94628e0daaa27b6e40607c90e3f5ec4"}, - {file = "rpds_py-0.27.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:249ab91ceaa6b41abc5f19513cb95b45c6f956f6b89f1fe3d99c81255a849f9e"}, - {file = "rpds_py-0.27.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d2f184336bc1d6abfaaa1262ed42739c3789b1e3a65a29916a615307d22ffd2e"}, - {file = "rpds_py-0.27.0-cp314-cp314-win32.whl", hash = "sha256:d3c622c39f04d5751408f5b801ecb527e6e0a471b367f420a877f7a660d583f6"}, - {file = "rpds_py-0.27.0-cp314-cp314-win_amd64.whl", hash = "sha256:cf824aceaeffff029ccfba0da637d432ca71ab21f13e7f6f5179cd88ebc77a8a"}, - {file = "rpds_py-0.27.0-cp314-cp314-win_arm64.whl", hash = "sha256:86aca1616922b40d8ac1b3073a1ead4255a2f13405e5700c01f7c8d29a03972d"}, - {file = "rpds_py-0.27.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:341d8acb6724c0c17bdf714319c393bb27f6d23d39bc74f94221b3e59fc31828"}, - {file = "rpds_py-0.27.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6b96b0b784fe5fd03beffff2b1533dc0d85e92bab8d1b2c24ef3a5dc8fac5669"}, - {file = "rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c431bfb91478d7cbe368d0a699978050d3b112d7f1d440a41e90faa325557fd"}, - {file = "rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20e222a44ae9f507d0f2678ee3dd0c45ec1e930f6875d99b8459631c24058aec"}, - {file = "rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:184f0d7b342967f6cda94a07d0e1fae177d11d0b8f17d73e06e36ac02889f303"}, - {file = "rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a00c91104c173c9043bc46f7b30ee5e6d2f6b1149f11f545580f5d6fdff42c0b"}, - {file = "rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a37dd208f0d658e0487522078b1ed68cd6bce20ef4b5a915d2809b9094b410"}, - {file = "rpds_py-0.27.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:92f3b3ec3e6008a1fe00b7c0946a170f161ac00645cde35e3c9a68c2475e8156"}, - {file = "rpds_py-0.27.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a1b3db5fae5cbce2131b7420a3f83553d4d89514c03d67804ced36161fe8b6b2"}, - {file = "rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5355527adaa713ab693cbce7c1e0ec71682f599f61b128cf19d07e5c13c9b1f1"}, - {file = "rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fcc01c57ce6e70b728af02b2401c5bc853a9e14eb07deda30624374f0aebfe42"}, - {file = "rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3001013dae10f806380ba739d40dee11db1ecb91684febb8406a87c2ded23dae"}, - {file = "rpds_py-0.27.0-cp314-cp314t-win32.whl", hash = "sha256:0f401c369186a5743694dd9fc08cba66cf70908757552e1f714bfc5219c655b5"}, - {file = "rpds_py-0.27.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8a1dca5507fa1337f75dcd5070218b20bc68cf8844271c923c1b79dfcbc20391"}, - {file = "rpds_py-0.27.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e0d7151a1bd5d0a203a5008fc4ae51a159a610cb82ab0a9b2c4d80241745582e"}, - {file = "rpds_py-0.27.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42ccc57ff99166a55a59d8c7d14f1a357b7749f9ed3584df74053fd098243451"}, - {file = "rpds_py-0.27.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e377e4cf8795cdbdff75b8f0223d7b6c68ff4fef36799d88ccf3a995a91c0112"}, - {file = "rpds_py-0.27.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:79af163a4b40bbd8cfd7ca86ec8b54b81121d3b213b4435ea27d6568bcba3e9d"}, - {file = "rpds_py-0.27.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2eff8ee57c5996b0d2a07c3601fb4ce5fbc37547344a26945dd9e5cbd1ed27a"}, - {file = "rpds_py-0.27.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7cf9bc4508efb18d8dff6934b602324eb9f8c6644749627ce001d6f38a490889"}, - {file = "rpds_py-0.27.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05284439ebe7d9f5f5a668d4d8a0a1d851d16f7d47c78e1fab968c8ad30cab04"}, - {file = "rpds_py-0.27.0-cp39-cp39-manylinux_2_31_riscv64.whl", hash = "sha256:1321bce595ad70e80f97f998db37356b2e22cf98094eba6fe91782e626da2f71"}, - {file = "rpds_py-0.27.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:737005088449ddd3b3df5a95476ee1c2c5c669f5c30eed909548a92939c0e12d"}, - {file = "rpds_py-0.27.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9b2a4e17bfd68536c3b801800941c95a1d4a06e3cada11c146093ba939d9638d"}, - {file = "rpds_py-0.27.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:dc6b0d5a1ea0318ef2def2b6a55dccf1dcaf77d605672347271ed7b829860765"}, - {file = "rpds_py-0.27.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4c3f8a0d4802df34fcdbeb3dfe3a4d8c9a530baea8fafdf80816fcaac5379d83"}, - {file = "rpds_py-0.27.0-cp39-cp39-win32.whl", hash = "sha256:699c346abc73993962cac7bb4f02f58e438840fa5458a048d3a178a7a670ba86"}, - {file = "rpds_py-0.27.0-cp39-cp39-win_amd64.whl", hash = "sha256:be806e2961cd390a89d6c3ce8c2ae34271cfcd05660f716257838bb560f1c3b6"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:46f48482c1a4748ab2773f75fffbdd1951eb59794e32788834b945da857c47a8"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:419dd9c98bcc9fb0242be89e0c6e922df333b975d4268faa90d58499fd9c9ebe"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55d42a0ef2bdf6bc81e1cc2d49d12460f63c6ae1423c4f4851b828e454ccf6f1"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e39169ac6aae06dd79c07c8a69d9da867cef6a6d7883a0186b46bb46ccfb0c3"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:935afcdea4751b0ac918047a2df3f720212892347767aea28f5b3bf7be4f27c0"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8de567dec6d451649a781633d36f5c7501711adee329d76c095be2178855b042"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:555ed147cbe8c8f76e72a4c6cd3b7b761cbf9987891b9448808148204aed74a5"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:d2cc2b34f9e1d31ce255174da82902ad75bd7c0d88a33df54a77a22f2ef421ee"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cb0702c12983be3b2fab98ead349ac63a98216d28dda6f518f52da5498a27a1b"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:ba783541be46f27c8faea5a6645e193943c17ea2f0ffe593639d906a327a9bcc"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:2406d034635d1497c596c40c85f86ecf2bf9611c1df73d14078af8444fe48031"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:dea0808153f1fbbad772669d906cddd92100277533a03845de6893cadeffc8be"}, - {file = "rpds_py-0.27.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d2a81bdcfde4245468f7030a75a37d50400ac2455c3a4819d9d550c937f90ab5"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e6491658dd2569f05860bad645569145c8626ac231877b0fb2d5f9bcb7054089"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:bec77545d188f8bdd29d42bccb9191682a46fb2e655e3d1fb446d47c55ac3b8d"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a4aebf8ca02bbb90a9b3e7a463bbf3bee02ab1c446840ca07b1695a68ce424"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44524b96481a4c9b8e6c46d6afe43fa1fb485c261e359fbe32b63ff60e3884d8"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45d04a73c54b6a5fd2bab91a4b5bc8b426949586e61340e212a8484919183859"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:343cf24de9ed6c728abefc5d5c851d5de06497caa7ac37e5e65dd572921ed1b5"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aed8118ae20515974650d08eb724150dc2e20c2814bcc307089569995e88a14"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:af9d4fd79ee1cc8e7caf693ee02737daabfc0fcf2773ca0a4735b356c8ad6f7c"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f0396e894bd1e66c74ecbc08b4f6a03dc331140942c4b1d345dd131b68574a60"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:59714ab0a5af25d723d8e9816638faf7f4254234decb7d212715c1aa71eee7be"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:88051c3b7d5325409f433c5a40328fcb0685fc04e5db49ff936e910901d10114"}, - {file = "rpds_py-0.27.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:181bc29e59e5e5e6e9d63b143ff4d5191224d355e246b5a48c88ce6b35c4e466"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9ad08547995a57e74fea6abaf5940d399447935faebbd2612b3b0ca6f987946b"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:61490d57e82e23b45c66f96184237994bfafa914433b8cd1a9bb57fecfced59d"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7cf5e726b6fa977e428a61880fb108a62f28b6d0c7ef675b117eaff7076df49"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dc662bc9375a6a394b62dfd331874c434819f10ee3902123200dbcf116963f89"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:299a245537e697f28a7511d01038c310ac74e8ea213c0019e1fc65f52c0dcb23"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:be3964f7312ea05ed283b20f87cb533fdc555b2e428cc7be64612c0b2124f08c"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33ba649a6e55ae3808e4c39e01580dc9a9b0d5b02e77b66bb86ef117922b1264"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:81f81bbd7cdb4bdc418c09a73809abeda8f263a6bf8f9c7f93ed98b5597af39d"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11e8e28c0ba0373d052818b600474cfee2fafa6c9f36c8587d217b13ee28ca7d"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:e3acb9c16530362aeaef4e84d57db357002dc5cbfac9a23414c3e73c08301ab2"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:2e307cb5f66c59ede95c00e93cd84190a5b7f3533d7953690b2036780622ba81"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:f09c9d4c26fa79c1bad927efb05aca2391350b8e61c38cbc0d7d3c814e463124"}, - {file = "rpds_py-0.27.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:af22763a0a1eff106426a6e1f13c4582e0d0ad89c1493ab6c058236174cd6c6a"}, - {file = "rpds_py-0.27.0.tar.gz", hash = "sha256:8b23cf252f180cda89220b378d917180f29d313cd6a07b2431c0d3b776aae86f"}, + {file = "rpds_py-0.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:68afeec26d42ab3b47e541b272166a0b4400313946871cba3ed3a4fc0cab1cef"}, + {file = "rpds_py-0.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74e5b2f7bb6fa38b1b10546d27acbacf2a022a8b5543efb06cfebc72a59c85be"}, + {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9024de74731df54546fab0bfbcdb49fae19159ecaecfc8f37c18d2c7e2c0bd61"}, + {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:31d3ebadefcd73b73928ed0b2fd696f7fefda8629229f81929ac9c1854d0cffb"}, + {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2e7f8f169d775dd9092a1743768d771f1d1300453ddfe6325ae3ab5332b4657"}, + {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d905d16f77eb6ab2e324e09bfa277b4c8e5e6b8a78a3e7ff8f3cdf773b4c013"}, + {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50c946f048209e6362e22576baea09193809f87687a95a8db24e5fbdb307b93a"}, + {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:3deab27804d65cd8289eb814c2c0e807c4b9d9916c9225e363cb0cf875eb67c1"}, + {file = "rpds_py-0.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8b61097f7488de4be8244c89915da8ed212832ccf1e7c7753a25a394bf9b1f10"}, + {file = "rpds_py-0.27.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:8a3f29aba6e2d7d90528d3c792555a93497fe6538aa65eb675b44505be747808"}, + {file = "rpds_py-0.27.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:dd6cd0485b7d347304067153a6dc1d73f7d4fd995a396ef32a24d24b8ac63ac8"}, + {file = "rpds_py-0.27.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f4461bf931108c9fa226ffb0e257c1b18dc2d44cd72b125bec50ee0ab1248a9"}, + {file = "rpds_py-0.27.1-cp310-cp310-win32.whl", hash = "sha256:ee5422d7fb21f6a00c1901bf6559c49fee13a5159d0288320737bbf6585bd3e4"}, + {file = "rpds_py-0.27.1-cp310-cp310-win_amd64.whl", hash = "sha256:3e039aabf6d5f83c745d5f9a0a381d031e9ed871967c0a5c38d201aca41f3ba1"}, + {file = "rpds_py-0.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:be898f271f851f68b318872ce6ebebbc62f303b654e43bf72683dbdc25b7c881"}, + {file = "rpds_py-0.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:62ac3d4e3e07b58ee0ddecd71d6ce3b1637de2d373501412df395a0ec5f9beb5"}, + {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4708c5c0ceb2d034f9991623631d3d23cb16e65c83736ea020cdbe28d57c0a0e"}, + {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:abfa1171a9952d2e0002aba2ad3780820b00cc3d9c98c6630f2e93271501f66c"}, + {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4b507d19f817ebaca79574b16eb2ae412e5c0835542c93fe9983f1e432aca195"}, + {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:168b025f8fd8d8d10957405f3fdcef3dc20f5982d398f90851f4abc58c566c52"}, + {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb56c6210ef77caa58e16e8c17d35c63fe3f5b60fd9ba9d424470c3400bcf9ed"}, + {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:d252f2d8ca0195faa707f8eb9368955760880b2b42a8ee16d382bf5dd807f89a"}, + {file = "rpds_py-0.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6e5e54da1e74b91dbc7996b56640f79b195d5925c2b78efaa8c5d53e1d88edde"}, + {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ffce0481cc6e95e5b3f0a47ee17ffbd234399e6d532f394c8dce320c3b089c21"}, + {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a205fdfe55c90c2cd8e540ca9ceba65cbe6629b443bc05db1f590a3db8189ff9"}, + {file = "rpds_py-0.27.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:689fb5200a749db0415b092972e8eba85847c23885c8543a8b0f5c009b1a5948"}, + {file = "rpds_py-0.27.1-cp311-cp311-win32.whl", hash = "sha256:3182af66048c00a075010bc7f4860f33913528a4b6fc09094a6e7598e462fe39"}, + {file = "rpds_py-0.27.1-cp311-cp311-win_amd64.whl", hash = "sha256:b4938466c6b257b2f5c4ff98acd8128ec36b5059e5c8f8372d79316b1c36bb15"}, + {file = "rpds_py-0.27.1-cp311-cp311-win_arm64.whl", hash = "sha256:2f57af9b4d0793e53266ee4325535a31ba48e2f875da81a9177c9926dfa60746"}, + {file = "rpds_py-0.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ae2775c1973e3c30316892737b91f9283f9908e3cc7625b9331271eaaed7dc90"}, + {file = "rpds_py-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2643400120f55c8a96f7c9d858f7be0c88d383cd4653ae2cf0d0c88f668073e5"}, + {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16323f674c089b0360674a4abd28d5042947d54ba620f72514d69be4ff64845e"}, + {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a1f4814b65eacac94a00fc9a526e3fdafd78e439469644032032d0d63de4881"}, + {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ba32c16b064267b22f1850a34051121d423b6f7338a12b9459550eb2096e7ec"}, + {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c20f33fd10485b80f65e800bbe5f6785af510b9f4056c5a3c612ebc83ba6cb"}, + {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466bfe65bd932da36ff279ddd92de56b042f2266d752719beb97b08526268ec5"}, + {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:41e532bbdcb57c92ba3be62c42e9f096431b4cf478da9bc3bc6ce5c38ab7ba7a"}, + {file = "rpds_py-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f149826d742b406579466283769a8ea448eed82a789af0ed17b0cd5770433444"}, + {file = "rpds_py-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80c60cfb5310677bd67cb1e85a1e8eb52e12529545441b43e6f14d90b878775a"}, + {file = "rpds_py-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7ee6521b9baf06085f62ba9c7a3e5becffbc32480d2f1b351559c001c38ce4c1"}, + {file = "rpds_py-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a512c8263249a9d68cac08b05dd59d2b3f2061d99b322813cbcc14c3c7421998"}, + {file = "rpds_py-0.27.1-cp312-cp312-win32.whl", hash = "sha256:819064fa048ba01b6dadc5116f3ac48610435ac9a0058bbde98e569f9e785c39"}, + {file = "rpds_py-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:d9199717881f13c32c4046a15f024971a3b78ad4ea029e8da6b86e5aa9cf4594"}, + {file = "rpds_py-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:33aa65b97826a0e885ef6e278fbd934e98cdcfed80b63946025f01e2f5b29502"}, + {file = "rpds_py-0.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e4b9fcfbc021633863a37e92571d6f91851fa656f0180246e84cbd8b3f6b329b"}, + {file = "rpds_py-0.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1441811a96eadca93c517d08df75de45e5ffe68aa3089924f963c782c4b898cf"}, + {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55266dafa22e672f5a4f65019015f90336ed31c6383bd53f5e7826d21a0e0b83"}, + {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78827d7ac08627ea2c8e02c9e5b41180ea5ea1f747e9db0915e3adf36b62dcf"}, + {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae92443798a40a92dc5f0b01d8a7c93adde0c4dc965310a29ae7c64d72b9fad2"}, + {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c46c9dd2403b66a2a3b9720ec4b74d4ab49d4fabf9f03dfdce2d42af913fe8d0"}, + {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2efe4eb1d01b7f5f1939f4ef30ecea6c6b3521eec451fb93191bf84b2a522418"}, + {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:15d3b4d83582d10c601f481eca29c3f138d44c92187d197aff663a269197c02d"}, + {file = "rpds_py-0.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4ed2e16abbc982a169d30d1a420274a709949e2cbdef119fe2ec9d870b42f274"}, + {file = "rpds_py-0.27.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a75f305c9b013289121ec0f1181931975df78738cdf650093e6b86d74aa7d8dd"}, + {file = "rpds_py-0.27.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:67ce7620704745881a3d4b0ada80ab4d99df390838839921f99e63c474f82cf2"}, + {file = "rpds_py-0.27.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d992ac10eb86d9b6f369647b6a3f412fc0075cfd5d799530e84d335e440a002"}, + {file = "rpds_py-0.27.1-cp313-cp313-win32.whl", hash = "sha256:4f75e4bd8ab8db624e02c8e2fc4063021b58becdbe6df793a8111d9343aec1e3"}, + {file = "rpds_py-0.27.1-cp313-cp313-win_amd64.whl", hash = "sha256:f9025faafc62ed0b75a53e541895ca272815bec18abe2249ff6501c8f2e12b83"}, + {file = "rpds_py-0.27.1-cp313-cp313-win_arm64.whl", hash = "sha256:ed10dc32829e7d222b7d3b93136d25a406ba9788f6a7ebf6809092da1f4d279d"}, + {file = "rpds_py-0.27.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:92022bbbad0d4426e616815b16bc4127f83c9a74940e1ccf3cfe0b387aba0228"}, + {file = "rpds_py-0.27.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:47162fdab9407ec3f160805ac3e154df042e577dd53341745fc7fb3f625e6d92"}, + {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb89bec23fddc489e5d78b550a7b773557c9ab58b7946154a10a6f7a214a48b2"}, + {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e48af21883ded2b3e9eb48cb7880ad8598b31ab752ff3be6457001d78f416723"}, + {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6f5b7bd8e219ed50299e58551a410b64daafb5017d54bbe822e003856f06a802"}, + {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08f1e20bccf73b08d12d804d6e1c22ca5530e71659e6673bce31a6bb71c1e73f"}, + {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dc5dceeaefcc96dc192e3a80bbe1d6c410c469e97bdd47494a7d930987f18b2"}, + {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d76f9cc8665acdc0c9177043746775aa7babbf479b5520b78ae4002d889f5c21"}, + {file = "rpds_py-0.27.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:134fae0e36022edad8290a6661edf40c023562964efea0cc0ec7f5d392d2aaef"}, + {file = "rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:eb11a4f1b2b63337cfd3b4d110af778a59aae51c81d195768e353d8b52f88081"}, + {file = "rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:13e608ac9f50a0ed4faec0e90ece76ae33b34c0e8656e3dceb9a7db994c692cd"}, + {file = "rpds_py-0.27.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dd2135527aa40f061350c3f8f89da2644de26cd73e4de458e79606384f4f68e7"}, + {file = "rpds_py-0.27.1-cp313-cp313t-win32.whl", hash = "sha256:3020724ade63fe320a972e2ffd93b5623227e684315adce194941167fee02688"}, + {file = "rpds_py-0.27.1-cp313-cp313t-win_amd64.whl", hash = "sha256:8ee50c3e41739886606388ba3ab3ee2aae9f35fb23f833091833255a31740797"}, + {file = "rpds_py-0.27.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:acb9aafccaae278f449d9c713b64a9e68662e7799dbd5859e2c6b3c67b56d334"}, + {file = "rpds_py-0.27.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:b7fb801aa7f845ddf601c49630deeeccde7ce10065561d92729bfe81bd21fb33"}, + {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe0dd05afb46597b9a2e11c351e5e4283c741237e7f617ffb3252780cca9336a"}, + {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b6dfb0e058adb12d8b1d1b25f686e94ffa65d9995a5157afe99743bf7369d62b"}, + {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed090ccd235f6fa8bb5861684567f0a83e04f52dfc2e5c05f2e4b1309fcf85e7"}, + {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf876e79763eecf3e7356f157540d6a093cef395b65514f17a356f62af6cc136"}, + {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12ed005216a51b1d6e2b02a7bd31885fe317e45897de81d86dcce7d74618ffff"}, + {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:ee4308f409a40e50593c7e3bb8cbe0b4d4c66d1674a316324f0c2f5383b486f9"}, + {file = "rpds_py-0.27.1-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0b08d152555acf1f455154d498ca855618c1378ec810646fcd7c76416ac6dc60"}, + {file = "rpds_py-0.27.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:dce51c828941973a5684d458214d3a36fcd28da3e1875d659388f4f9f12cc33e"}, + {file = "rpds_py-0.27.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c1476d6f29eb81aa4151c9a31219b03f1f798dc43d8af1250a870735516a1212"}, + {file = "rpds_py-0.27.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:3ce0cac322b0d69b63c9cdb895ee1b65805ec9ffad37639f291dd79467bee675"}, + {file = "rpds_py-0.27.1-cp314-cp314-win32.whl", hash = "sha256:dfbfac137d2a3d0725758cd141f878bf4329ba25e34979797c89474a89a8a3a3"}, + {file = "rpds_py-0.27.1-cp314-cp314-win_amd64.whl", hash = "sha256:a6e57b0abfe7cc513450fcf529eb486b6e4d3f8aee83e92eb5f1ef848218d456"}, + {file = "rpds_py-0.27.1-cp314-cp314-win_arm64.whl", hash = "sha256:faf8d146f3d476abfee026c4ae3bdd9ca14236ae4e4c310cbd1cf75ba33d24a3"}, + {file = "rpds_py-0.27.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:ba81d2b56b6d4911ce735aad0a1d4495e808b8ee4dc58715998741a26874e7c2"}, + {file = "rpds_py-0.27.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:84f7d509870098de0e864cad0102711c1e24e9b1a50ee713b65928adb22269e4"}, + {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a9e960fc78fecd1100539f14132425e1d5fe44ecb9239f8f27f079962021523e"}, + {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:62f85b665cedab1a503747617393573995dac4600ff51869d69ad2f39eb5e817"}, + {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fed467af29776f6556250c9ed85ea5a4dd121ab56a5f8b206e3e7a4c551e48ec"}, + {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2729615f9d430af0ae6b36cf042cb55c0936408d543fb691e1a9e36648fd35a"}, + {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b207d881a9aef7ba753d69c123a35d96ca7cb808056998f6b9e8747321f03b8"}, + {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:639fd5efec029f99b79ae47e5d7e00ad8a773da899b6309f6786ecaf22948c48"}, + {file = "rpds_py-0.27.1-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fecc80cb2a90e28af8a9b366edacf33d7a91cbfe4c2c4544ea1246e949cfebeb"}, + {file = "rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:42a89282d711711d0a62d6f57d81aa43a1368686c45bc1c46b7f079d55692734"}, + {file = "rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:cf9931f14223de59551ab9d38ed18d92f14f055a5f78c1d8ad6493f735021bbb"}, + {file = "rpds_py-0.27.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f39f58a27cc6e59f432b568ed8429c7e1641324fbe38131de852cd77b2d534b0"}, + {file = "rpds_py-0.27.1-cp314-cp314t-win32.whl", hash = "sha256:d5fa0ee122dc09e23607a28e6d7b150da16c662e66409bbe85230e4c85bb528a"}, + {file = "rpds_py-0.27.1-cp314-cp314t-win_amd64.whl", hash = "sha256:6567d2bb951e21232c2f660c24cf3470bb96de56cdcb3f071a83feeaff8a2772"}, + {file = "rpds_py-0.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c918c65ec2e42c2a78d19f18c553d77319119bf43aa9e2edf7fb78d624355527"}, + {file = "rpds_py-0.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1fea2b1a922c47c51fd07d656324531adc787e415c8b116530a1d29c0516c62d"}, + {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbf94c58e8e0cd6b6f38d8de67acae41b3a515c26169366ab58bdca4a6883bb8"}, + {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c2a8fed130ce946d5c585eddc7c8eeef0051f58ac80a8ee43bd17835c144c2cc"}, + {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:037a2361db72ee98d829bc2c5b7cc55598ae0a5e0ec1823a56ea99374cfd73c1"}, + {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5281ed1cc1d49882f9997981c88df1a22e140ab41df19071222f7e5fc4e72125"}, + {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fd50659a069c15eef8aa3d64bbef0d69fd27bb4a50c9ab4f17f83a16cbf8905"}, + {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_31_riscv64.whl", hash = "sha256:c4b676c4ae3921649a15d28ed10025548e9b561ded473aa413af749503c6737e"}, + {file = "rpds_py-0.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:079bc583a26db831a985c5257797b2b5d3affb0386e7ff886256762f82113b5e"}, + {file = "rpds_py-0.27.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4e44099bd522cba71a2c6b97f68e19f40e7d85399de899d66cdb67b32d7cb786"}, + {file = "rpds_py-0.27.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e202e6d4188e53c6661af813b46c37ca2c45e497fc558bacc1a7630ec2695aec"}, + {file = "rpds_py-0.27.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f41f814b8eaa48768d1bb551591f6ba45f87ac76899453e8ccd41dba1289b04b"}, + {file = "rpds_py-0.27.1-cp39-cp39-win32.whl", hash = "sha256:9e71f5a087ead99563c11fdaceee83ee982fd39cf67601f4fd66cb386336ee52"}, + {file = "rpds_py-0.27.1-cp39-cp39-win_amd64.whl", hash = "sha256:71108900c9c3c8590697244b9519017a400d9ba26a36c48381b3f64743a44aab"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7ba22cb9693df986033b91ae1d7a979bc399237d45fccf875b76f62bb9e52ddf"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b640501be9288c77738b5492b3fd3abc4ba95c50c2e41273c8a1459f08298d3"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb08b65b93e0c6dd70aac7f7890a9c0938d5ec71d5cb32d45cf844fb8ae47636"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d7ff07d696a7a38152ebdb8212ca9e5baab56656749f3d6004b34ab726b550b8"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb7c72262deae25366e3b6c0c0ba46007967aea15d1eea746e44ddba8ec58dcc"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b002cab05d6339716b03a4a3a2ce26737f6231d7b523f339fa061d53368c9d8"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23f6b69d1c26c4704fec01311963a41d7de3ee0570a84ebde4d544e5a1859ffc"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:530064db9146b247351f2a0250b8f00b289accea4596a033e94be2389977de71"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7b90b0496570bd6b0321724a330d8b545827c4df2034b6ddfc5f5275f55da2ad"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879b0e14a2da6a1102a3fc8af580fc1ead37e6d6692a781bd8c83da37429b5ab"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:0d807710df3b5faa66c731afa162ea29717ab3be17bdc15f90f2d9f183da4059"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3adc388fc3afb6540aec081fa59e6e0d3908722771aa1e37ffe22b220a436f0b"}, + {file = "rpds_py-0.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:c796c0c1cc68cb08b0284db4229f5af76168172670c74908fdbd4b7d7f515819"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:cdfe4bb2f9fe7458b7453ad3c33e726d6d1c7c0a72960bcc23800d77384e42df"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:8fabb8fd848a5f75a2324e4a84501ee3a5e3c78d8603f83475441866e60b94a3"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda8719d598f2f7f3e0f885cba8646644b55a187762bec091fa14a2b819746a9"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c64d07e95606ec402a0a1c511fe003873fa6af630bda59bac77fac8b4318ebc"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93a2ed40de81bcff59aabebb626562d48332f3d028ca2036f1d23cbb52750be4"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:387ce8c44ae94e0ec50532d9cb0edce17311024c9794eb196b90e1058aadeb66"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf94f812c95b5e60ebaf8bfb1898a7d7cb9c1af5744d4a67fa47796e0465d4e"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4848ca84d6ded9b58e474dfdbad4b8bfb450344c0551ddc8d958bf4b36aa837c"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2bde09cbcf2248b73c7c323be49b280180ff39fadcfe04e7b6f54a678d02a7cf"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:94c44ee01fd21c9058f124d2d4f0c9dc7634bec93cd4b38eefc385dabe71acbf"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:df8b74962e35c9249425d90144e721eed198e6555a0e22a563d29fe4486b51f6"}, + {file = "rpds_py-0.27.1-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:dc23e6820e3b40847e2f4a7726462ba0cf53089512abe9ee16318c366494c17a"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aa8933159edc50be265ed22b401125c9eebff3171f570258854dbce3ecd55475"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a50431bf02583e21bf273c71b89d710e7a710ad5e39c725b14e685610555926f"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78af06ddc7fe5cc0e967085a9115accee665fb912c22a3f54bad70cc65b05fe6"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:70d0738ef8fee13c003b100c2fbd667ec4f133468109b3472d249231108283a3"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2f6fd8a1cea5bbe599b6e78a6e5ee08db434fc8ffea51ff201c8765679698b3"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8177002868d1426305bb5de1e138161c2ec9eb2d939be38291d7c431c4712df8"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:008b839781d6c9bf3b6a8984d1d8e56f0ec46dc56df61fd669c49b58ae800400"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:a55b9132bb1ade6c734ddd2759c8dc132aa63687d259e725221f106b83a0e485"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a46fdec0083a26415f11d5f236b79fa1291c32aaa4a17684d82f7017a1f818b1"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:8a63b640a7845f2bdd232eb0d0a4a2dd939bcdd6c57e6bb134526487f3160ec5"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:7e32721e5d4922deaaf963469d795d5bde6093207c52fec719bd22e5d1bedbc4"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:2c426b99a068601b5f4623573df7a7c3d72e87533a2dd2253353a03e7502566c"}, + {file = "rpds_py-0.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:4fc9b7fe29478824361ead6e14e4f5aed570d477e06088826537e202d25fe859"}, + {file = "rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8"}, ] [[package]] @@ -3845,57 +3914,59 @@ files = [ [[package]] name = "ruff" -version = "0.12.9" +version = "0.12.10" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" groups = ["dev"] files = [ - {file = "ruff-0.12.9-py3-none-linux_armv6l.whl", hash = "sha256:fcebc6c79fcae3f220d05585229463621f5dbf24d79fdc4936d9302e177cfa3e"}, - {file = "ruff-0.12.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aed9d15f8c5755c0e74467731a007fcad41f19bcce41cd75f768bbd687f8535f"}, - {file = "ruff-0.12.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5b15ea354c6ff0d7423814ba6d44be2807644d0c05e9ed60caca87e963e93f70"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d596c2d0393c2502eaabfef723bd74ca35348a8dac4267d18a94910087807c53"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b15599931a1a7a03c388b9c5df1bfa62be7ede6eb7ef753b272381f39c3d0ff"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d02faa2977fb6f3f32ddb7828e212b7dd499c59eb896ae6c03ea5c303575756"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:17d5b6b0b3a25259b69ebcba87908496e6830e03acfb929ef9fd4c58675fa2ea"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72db7521860e246adbb43f6ef464dd2a532ef2ef1f5dd0d470455b8d9f1773e0"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a03242c1522b4e0885af63320ad754d53983c9599157ee33e77d748363c561ce"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fc83e4e9751e6c13b5046d7162f205d0a7bac5840183c5beebf824b08a27340"}, - {file = "ruff-0.12.9-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:881465ed56ba4dd26a691954650de6ad389a2d1fdb130fe51ff18a25639fe4bb"}, - {file = "ruff-0.12.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:43f07a3ccfc62cdb4d3a3348bf0588358a66da756aa113e071b8ca8c3b9826af"}, - {file = "ruff-0.12.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:07adb221c54b6bba24387911e5734357f042e5669fa5718920ee728aba3cbadc"}, - {file = "ruff-0.12.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f5cd34fabfdea3933ab85d72359f118035882a01bff15bd1d2b15261d85d5f66"}, - {file = "ruff-0.12.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:f6be1d2ca0686c54564da8e7ee9e25f93bdd6868263805f8c0b8fc6a449db6d7"}, - {file = "ruff-0.12.9-py3-none-win32.whl", hash = "sha256:cc7a37bd2509974379d0115cc5608a1a4a6c4bff1b452ea69db83c8855d53f93"}, - {file = "ruff-0.12.9-py3-none-win_amd64.whl", hash = "sha256:6fb15b1977309741d7d098c8a3cb7a30bc112760a00fb6efb7abc85f00ba5908"}, - {file = "ruff-0.12.9-py3-none-win_arm64.whl", hash = "sha256:63c8c819739d86b96d500cce885956a1a48ab056bbcbc61b747ad494b2485089"}, - {file = "ruff-0.12.9.tar.gz", hash = "sha256:fbd94b2e3c623f659962934e52c2bea6fc6da11f667a427a368adaf3af2c866a"}, + {file = "ruff-0.12.10-py3-none-linux_armv6l.whl", hash = "sha256:8b593cb0fb55cc8692dac7b06deb29afda78c721c7ccfed22db941201b7b8f7b"}, + {file = "ruff-0.12.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:ebb7333a45d56efc7c110a46a69a1b32365d5c5161e7244aaf3aa20ce62399c1"}, + {file = "ruff-0.12.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d59e58586829f8e4a9920788f6efba97a13d1fa320b047814e8afede381c6839"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:822d9677b560f1fdeab69b89d1f444bf5459da4aa04e06e766cf0121771ab844"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:37b4a64f4062a50c75019c61c7017ff598cb444984b638511f48539d3a1c98db"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2c6f4064c69d2542029b2a61d39920c85240c39837599d7f2e32e80d36401d6e"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:059e863ea3a9ade41407ad71c1de2badfbe01539117f38f763ba42a1206f7559"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1bef6161e297c68908b7218fa6e0e93e99a286e5ed9653d4be71e687dff101cf"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4f1345fbf8fb0531cd722285b5f15af49b2932742fc96b633e883da8d841896b"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f68433c4fbc63efbfa3ba5db31727db229fa4e61000f452c540474b03de52a9"}, + {file = "ruff-0.12.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:141ce3d88803c625257b8a6debf4a0473eb6eed9643a6189b68838b43e78165a"}, + {file = "ruff-0.12.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f3fc21178cd44c98142ae7590f42ddcb587b8e09a3b849cbc84edb62ee95de60"}, + {file = "ruff-0.12.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:7d1a4e0bdfafcd2e3e235ecf50bf0176f74dd37902f241588ae1f6c827a36c56"}, + {file = "ruff-0.12.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:e67d96827854f50b9e3e8327b031647e7bcc090dbe7bb11101a81a3a2cbf1cc9"}, + {file = "ruff-0.12.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ae479e1a18b439c59138f066ae79cc0f3ee250712a873d00dbafadaad9481e5b"}, + {file = "ruff-0.12.10-py3-none-win32.whl", hash = "sha256:9de785e95dc2f09846c5e6e1d3a3d32ecd0b283a979898ad427a9be7be22b266"}, + {file = "ruff-0.12.10-py3-none-win_amd64.whl", hash = "sha256:7837eca8787f076f67aba2ca559cefd9c5cbc3a9852fd66186f4201b87c1563e"}, + {file = "ruff-0.12.10-py3-none-win_arm64.whl", hash = "sha256:cc138cc06ed9d4bfa9d667a65af7172b47840e1a98b02ce7011c391e54635ffc"}, + {file = "ruff-0.12.10.tar.gz", hash = "sha256:189ab65149d11ea69a2d775343adf5f49bb2426fc4780f65ee33b423ad2e47f9"}, ] [[package]] name = "safety" -version = "3.2.3" +version = "3.2.9" description = "Checks installed dependencies for known vulnerabilities and licenses." optional = false python-versions = ">=3.7" groups = ["security"] files = [ - {file = "safety-3.2.3-py3-none-any.whl", hash = "sha256:cda1e91749f610337a18b7f21f78267c127e44ebbbbcbbd419c83284279a5024"}, - {file = "safety-3.2.3.tar.gz", hash = "sha256:414154934f1727daf8a6473493944fecb380540c3f00875dc1ae377382f7d83f"}, + {file = "safety-3.2.9-py3-none-any.whl", hash = "sha256:5e199c057550dc6146c081084274279dfb98c17735193b028db09a55ea508f1a"}, + {file = "safety-3.2.9.tar.gz", hash = "sha256:494bea752366161ac9e0742033d2a82e4dc51d7c788be42e0ecf5f3ef36b8071"}, ] [package.dependencies] Authlib = ">=1.2.0" Click = ">=8.0.2" dparse = ">=0.6.4b0" +filelock = ">=3.12.2,<3.13.0" jinja2 = ">=3.1.0" marshmallow = ">=3.15.0" packaging = ">=21.0" +psutil = ">=6.0.0,<6.1.0" pydantic = ">=1.10.12" requests = "*" rich = "*" "ruamel.yaml" = ">=0.17.21" -safety-schemas = ">=0.0.2" +safety-schemas = ">=0.0.4" setuptools = ">=65.5.1" typer = "*" typing-extensions = ">=4.7.1" @@ -4096,14 +4167,14 @@ files = [ [[package]] name = "soupsieve" -version = "2.7" +version = "2.8" description = "A modern CSS selector implementation for Beautiful Soup." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main", "jupyter"] files = [ - {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, - {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, + {file = "soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c"}, + {file = "soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f"}, ] [[package]] @@ -4224,14 +4295,14 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] [[package]] name = "starlette" -version = "0.47.2" +version = "0.47.3" description = "The little ASGI library that shines." optional = false python-versions = ">=3.9" groups = ["api"] files = [ - {file = "starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b"}, - {file = "starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8"}, + {file = "starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51"}, + {file = "starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9"}, ] [package.dependencies] @@ -4243,19 +4314,16 @@ full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart [[package]] name = "stevedore" -version = "5.4.1" +version = "5.5.0" description = "Manage dynamic plugins for Python applications" optional = false python-versions = ">=3.9" groups = ["security"] files = [ - {file = "stevedore-5.4.1-py3-none-any.whl", hash = "sha256:d10a31c7b86cba16c1f6e8d15416955fc797052351a56af15e608ad20811fcfe"}, - {file = "stevedore-5.4.1.tar.gz", hash = "sha256:3135b5ae50fe12816ef291baff420acb727fcd356106e3e9cbfa9e5985cd6f4b"}, + {file = "stevedore-5.5.0-py3-none-any.whl", hash = "sha256:18363d4d268181e8e8452e71a38cd77630f345b2ef6b4a8d5614dac5ee0d18cf"}, + {file = "stevedore-5.5.0.tar.gz", hash = "sha256:d31496a4f4df9825e1a1e4f1f74d19abb0154aff311c3b376fcc89dae8fccd73"}, ] -[package.dependencies] -pbr = ">=2.0.0" - [[package]] name = "terminado" version = "0.18.1" @@ -4297,26 +4365,38 @@ webencodings = ">=0.4" doc = ["sphinx", "sphinx_rtd_theme"] test = ["pytest", "ruff"] +[[package]] +name = "toolz" +version = "1.0.0" +description = "List processing tools and functional utilities" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236"}, + {file = "toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02"}, +] + [[package]] name = "tornado" -version = "6.5.1" +version = "6.5.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." optional = false python-versions = ">=3.9" groups = ["main", "jupyter"] files = [ - {file = "tornado-6.5.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d50065ba7fd11d3bd41bcad0825227cc9a95154bad83239357094c36708001f7"}, - {file = "tornado-6.5.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9e9ca370f717997cb85606d074b0e5b247282cf5e2e1611568b8821afe0342d6"}, - {file = "tornado-6.5.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b77e9dfa7ed69754a54c89d82ef746398be82f749df69c4d3abe75c4d1ff4888"}, - {file = "tornado-6.5.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253b76040ee3bab8bcf7ba9feb136436a3787208717a1fb9f2c16b744fba7331"}, - {file = "tornado-6.5.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:308473f4cc5a76227157cdf904de33ac268af770b2c5f05ca6c1161d82fdd95e"}, - {file = "tornado-6.5.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:caec6314ce8a81cf69bd89909f4b633b9f523834dc1a352021775d45e51d9401"}, - {file = "tornado-6.5.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:13ce6e3396c24e2808774741331638ee6c2f50b114b97a55c5b442df65fd9692"}, - {file = "tornado-6.5.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5cae6145f4cdf5ab24744526cc0f55a17d76f02c98f4cff9daa08ae9a217448a"}, - {file = "tornado-6.5.1-cp39-abi3-win32.whl", hash = "sha256:e0a36e1bc684dca10b1aa75a31df8bdfed656831489bc1e6a6ebed05dc1ec365"}, - {file = "tornado-6.5.1-cp39-abi3-win_amd64.whl", hash = "sha256:908e7d64567cecd4c2b458075589a775063453aeb1d2a1853eedb806922f568b"}, - {file = "tornado-6.5.1-cp39-abi3-win_arm64.whl", hash = "sha256:02420a0eb7bf617257b9935e2b754d1b63897525d8a289c9d65690d580b4dcf7"}, - {file = "tornado-6.5.1.tar.gz", hash = "sha256:84ceece391e8eb9b2b95578db65e920d2a61070260594819589609ba9bc6308c"}, + {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2436822940d37cde62771cff8774f4f00b3c8024fe482e16ca8387b8a2724db6"}, + {file = "tornado-6.5.2-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:583a52c7aa94ee046854ba81d9ebb6c81ec0fd30386d96f7640c96dad45a03ef"}, + {file = "tornado-6.5.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0fe179f28d597deab2842b86ed4060deec7388f1fd9c1b4a41adf8af058907e"}, + {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b186e85d1e3536d69583d2298423744740986018e393d0321df7340e71898882"}, + {file = "tornado-6.5.2-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e792706668c87709709c18b353da1f7662317b563ff69f00bab83595940c7108"}, + {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:06ceb1300fd70cb20e43b1ad8aaee0266e69e7ced38fa910ad2e03285009ce7c"}, + {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:74db443e0f5251be86cbf37929f84d8c20c27a355dd452a5cfa2aada0d001ec4"}, + {file = "tornado-6.5.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b5e735ab2889d7ed33b32a459cac490eda71a1ba6857b0118de476ab6c366c04"}, + {file = "tornado-6.5.2-cp39-abi3-win32.whl", hash = "sha256:c6f29e94d9b37a95013bb669616352ddb82e3bfe8326fccee50583caebc8a5f0"}, + {file = "tornado-6.5.2-cp39-abi3-win_amd64.whl", hash = "sha256:e56a5af51cc30dd2cae649429af65ca2f6571da29504a07995175df14c18f35f"}, + {file = "tornado-6.5.2-cp39-abi3-win_arm64.whl", hash = "sha256:d6c33dc3672e3a1f3618eb63b7ef4683a7688e7b9e6e8f0d9aa5726360a004af"}, + {file = "tornado-6.5.2.tar.gz", hash = "sha256:ab53c8f9a0fa351e2c0741284e06c7a45da86afb544133201c5cc8578eb076a0"}, ] markers = {main = "sys_platform != \"emscripten\""} @@ -4356,26 +4436,26 @@ typing-extensions = ">=3.7.4.3" [[package]] name = "types-python-dateutil" -version = "2.9.0.20250809" +version = "2.9.0.20250822" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.9" groups = ["jupyter"] files = [ - {file = "types_python_dateutil-2.9.0.20250809-py3-none-any.whl", hash = "sha256:768890cac4f2d7fd9e0feb6f3217fce2abbfdfc0cadd38d11fba325a815e4b9f"}, - {file = "types_python_dateutil-2.9.0.20250809.tar.gz", hash = "sha256:69cbf8d15ef7a75c3801d65d63466e46ac25a0baa678d89d0a137fc31a608cc1"}, + {file = "types_python_dateutil-2.9.0.20250822-py3-none-any.whl", hash = "sha256:849d52b737e10a6dc6621d2bd7940ec7c65fcb69e6aa2882acf4e56b2b508ddc"}, + {file = "types_python_dateutil-2.9.0.20250822.tar.gz", hash = "sha256:84c92c34bd8e68b117bff742bc00b692a1e8531262d4507b33afcc9f7716cd53"}, ] [[package]] name = "typing-extensions" -version = "4.14.1" +version = "4.15.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" groups = ["main", "api", "jupyter", "security"] files = [ - {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, - {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, ] [[package]] @@ -4459,14 +4539,14 @@ standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3) [[package]] name = "virtualenv" -version = "20.33.1" +version = "20.34.0" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" groups = ["dev"] files = [ - {file = "virtualenv-20.33.1-py3-none-any.whl", hash = "sha256:07c19bc66c11acab6a5958b815cbcee30891cd1c2ccf53785a28651a0d8d8a67"}, - {file = "virtualenv-20.33.1.tar.gz", hash = "sha256:1b44478d9e261b3fb8baa5e74a0ca3bc0e05f21aa36167bf9cbf850e542765b8"}, + {file = "virtualenv-20.34.0-py3-none-any.whl", hash = "sha256:341f5afa7eee943e4984a9207c025feedd768baff6753cd660c857ceb3e36026"}, + {file = "virtualenv-20.34.0.tar.gz", hash = "sha256:44815b2c9dee7ed86e387b842a84f20b93f7f417f95886ca1996a72a4138eb1a"}, ] [package.dependencies] @@ -4664,7 +4744,28 @@ websockets = ">=13.0" nospam = ["requests_cache (>=1.0)", "requests_ratelimiter (>=0.3.1)"] repair = ["scipy (>=1.6.3)"] +[[package]] +name = "yfinance-cache" +version = "0.7.13" +description = "Smart caching wrapper for 'yfinance' module" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "yfinance_cache-0.7.13-py3-none-any.whl", hash = "sha256:89e5bb1a47b66eb4449d564386febcaa74d47c8f1fa13b20c279d1c24933c626"}, + {file = "yfinance_cache-0.7.13.tar.gz", hash = "sha256:eb0286dce8322e8905c0e215c8f221ba36592844dd9318899c4d72ac075353db"}, +] + +[package.dependencies] +exchange_calendars = ">=4.10" +numpy = ">=1.26" +pandas = ">=1.5" +platformdirs = "*" +pulp = "*" +scipy = ">=1.6.3" +yfinance = ">=0.2.57" + [metadata] lock-version = "2.1" python-versions = ">=3.12,<4.0" -content-hash = "2111919c400251323be36fff40337da2740929253577c890477bebafb0e9e119" +content-hash = "e6a52cb8ab7e68757c57176aa86996feb45551a5f360de5aa1cd99dac197eef1" diff --git a/poetry.toml b/poetry.toml deleted file mode 100644 index ab1033b..0000000 --- a/poetry.toml +++ /dev/null @@ -1,2 +0,0 @@ -[virtualenvs] -in-project = true diff --git a/pyproject.toml b/pyproject.toml index 498fa33..f15a6e6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,6 +20,7 @@ sqlalchemy = "^2.0" psycopg2-binary = "^2.9" # Data sources yfinance = "^0.2" +yfinance-cache = "^0.7.13" requests = "^2.32.4" # Web scraping beautifulsoup4 = "^4.13.4" diff --git a/scripts/build_tailwind.sh b/scripts/build_tailwind.sh new file mode 100755 index 0000000..cb7b43f --- /dev/null +++ b/scripts/build_tailwind.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash +set -euo pipefail + +INPUT="src/reporting/tailwind.input.css" +CONFIG="tailwind.config.js" +OUTPUT_DIR="exports/reports/assets" +OUTPUT="$OUTPUT_DIR/tailwind.min.css" + +if ! command -v npx >/dev/null 2>&1; then + echo "npx is required. Please install Node.js (>=18) and try again." >&2 + exit 1 +fi + +mkdir -p "$OUTPUT_DIR" + +echo "Building Tailwind CSS → $OUTPUT" +npx tailwindcss -c "$CONFIG" -i "$INPUT" -o "$OUTPUT" --minify +echo "Done. Set TAILWIND_CSS_HREF=$OUTPUT to use the local file." diff --git a/scripts/check_data_ranges.py b/scripts/check_data_ranges.py new file mode 100644 index 0000000..85faaa4 --- /dev/null +++ b/scripts/check_data_ranges.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +""" +Check available OHLC data ranges for given symbols using UnifiedDataManager. + +Prints symbol, number of rows, first date, last date (UTC). +""" + +from __future__ import annotations + +import datetime + +try: + from src.core.data_manager import UnifiedDataManager +except Exception as e: + raise SystemExit(f"Could not import UnifiedDataManager: {e}") + +SYMBOLS = ["AGG", "HYG", "TLT", "JPST", "EMB"] + + +def fmt(dt): + if dt is None: + return "None" + try: + return dt.tz_convert("UTC").isoformat() + except Exception: + try: + return dt.isoformat() + except Exception: + return str(dt) + + +def main(): + dm = UnifiedDataManager() + for s in SYMBOLS: + try: + print(f"--- {s} ---") + # Request wide range to emulate 'max' (use aware date for lint) + today_iso = datetime.datetime.now(datetime.timezone.utc).date().isoformat() + data = dm.get_data(s, "1900-01-01", today_iso, "1d") + if data is None: + print("No data returned") + continue + # Ensure index is datetime + idx = data.index + if len(idx) == 0: + print("Empty index") + continue + first = idx[0] + last = idx[-1] + print("rows:", len(data)) + print("first:", fmt(first)) + print("last:", fmt(last)) + except Exception as e: + print("Error fetching", s, "->", e) + + +if __name__ == "__main__": + main() diff --git a/scripts/compare_direct_db.py b/scripts/compare_direct_db.py new file mode 100644 index 0000000..86e7d82 --- /dev/null +++ b/scripts/compare_direct_db.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +""" +Compare direct backtesting library results (exports/direct_portfolio_comparison.json) +with BestStrategy rows in the database. + +Produces exports/compare_direct_db_results.json and prints a summary. + +Usage: + python3 scripts/compare_direct_db.py --direct exports/direct_portfolio_comparison.json +""" + +from __future__ import annotations + +import argparse +import json +from pathlib import Path + +from src.database import unified_models as um + + +def parse_args(): + p = argparse.ArgumentParser() + p.add_argument( + "--direct", + required=True, + help="Path to direct backtest JSON (exports/direct_portfolio_comparison.json)", + ) + return p.parse_args() + + +def normalize_interval_name(interval: str) -> str: + """Normalize interval strings to the format stored in DB/timeframe keys.""" + # Accept both "1min" and "1m" etc. We will normalize common variants to short form used across project. + mapping = { + "1min": "1m", + "2min": "2m", + "5min": "5m", + "15min": "15m", + "30min": "30m", + "60min": "60m", + "1h": "1h", + "4h": "4h", + "1d": "1d", + "5d": "5d", + "1wk": "1wk", + "1mo": "1mo", + "3mo": "3mo", + } + return mapping.get(interval, interval) + + +def main(): + args = parse_args() + direct_path = Path(args.direct) + if not direct_path.exists(): + print("Direct results file not found:", direct_path) + return + + with direct_path.open() as f: + data = json.load(f) + + sess = um.Session() + results = {} + summary = {"total": 0, "matched": 0, "missing_db": 0, "mismatched": 0} + + try: + for symbol, intervals in data.items(): + results.setdefault(symbol, {}) + for interval, runs in intervals.items(): + summary["total"] += 1 + norm_interval = normalize_interval_name(interval) + + # runs is a list of dicts: {"strategy":..., "stats":..., "error":...} + # Find best strategy by native Sortino Ratio (highest). Ignore errored runs. + candidates = [ + r + for r in runs + if (r.get("stats") or {}).get("Sortino Ratio") is not None + ] + if not candidates: + results[symbol][interval] = { + "direct_best": None, + "direct_sortino": None, + "db_best": None, + "db_sortino": None, + "match": False, + "note": "no_valid_direct_metrics", + } + summary["mismatched"] += 1 + continue + + best_direct = max( + candidates, + key=lambda r: float( + (r.get("stats") or {}).get("Sortino Ratio") or float("-inf") + ), + ) + direct_best_name = best_direct.get("strategy") + try: + direct_sortino = float( + (best_direct.get("stats") or {}).get("Sortino Ratio") or 0 + ) + except Exception: + direct_sortino = 0.0 + + # Query DB for BestStrategy for this symbol/timeframe + db_row = ( + sess.query(um.BestStrategy) + .filter( + um.BestStrategy.symbol == symbol, + um.BestStrategy.timeframe == norm_interval, + ) + .first() + ) + + if not db_row: + results[symbol][interval] = { + "direct_best": direct_best_name, + "direct_sortino": direct_sortino, + "db_best": None, + "db_sortino": None, + "match": False, + "note": "no_db_row", + } + summary["missing_db"] += 1 + continue + + db_best = db_row.strategy + db_sortino = float(getattr(db_row, "sortino_ratio", 0) or 0) + + match = str(db_best).strip() == str(direct_best_name).strip() + if match: + summary["matched"] += 1 + else: + summary["mismatched"] += 1 + + results[symbol][interval] = { + "direct_best": direct_best_name, + "direct_sortino": direct_sortino, + "db_best": db_best, + "db_sortino": db_sortino, + "match": match, + "note": None, + } + + finally: + sess.close() + + out_path = Path("exports") / "compare_direct_db_results.json" + out_path.parent.mkdir(parents=True, exist_ok=True) + with out_path.open("w") as f: + json.dump({"summary": summary, "results": results}, f, indent=2, default=str) + + print("Comparison complete.") + print("Summary:", summary) + print("Detailed results written to:", out_path) + + +if __name__ == "__main__": + main() diff --git a/scripts/compare_json_db.py b/scripts/compare_json_db.py new file mode 100644 index 0000000..e809128 --- /dev/null +++ b/scripts/compare_json_db.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +""" +Compare exports/comparison_sample_Q3_2025.json best strategy (rank 1) +with BestStrategy rows in the database (timeframe=1d). + +Outputs results to exports/compare_json_db_results.json and prints a summary to stdout. +""" + +from __future__ import annotations + +import json +from pathlib import Path + +from src.database import get_db_session +from src.database.models import BestStrategy + +INPUT = Path("exports/comparison_sample_Q3_2025.json") +OUTPUT = Path("exports/compare_json_db_results.json") +TIMEFRAME = "1d" + + +def main(): + if not INPUT.exists(): + print(f"Input file not found: {INPUT}") + return + + with INPUT.open() as f: + data = json.load(f) + + session = get_db_session() + results = {} + summary = {"total": 0, "matched": 0, "missing_db": 0, "mismatched": 0} + + try: + for symbol, symbol_data in data.items(): + summary["total"] += 1 + # Prefer explicit best_strategy field if present + json_best = None + json_sortino = None + if symbol_data.get("best_strategy"): + json_best = symbol_data["best_strategy"].get("strategy") + json_sortino = ( + symbol_data["best_strategy"].get("metrics", {}).get("sortino_ratio") + ) + else: + # Fallback to results array where rank==1 + for r in symbol_data.get("results", []): + if r.get("rank") == 1: + json_best = r.get("strategy") + json_sortino = r.get("metrics", {}).get("sortino_ratio") + break + + db_row = ( + session.query(BestStrategy) + .filter_by(symbol=symbol, timeframe=TIMEFRAME) + .first() + ) + + if not db_row: + results[symbol] = { + "json_best": json_best, + "json_sortino": json_sortino, + "db_best": None, + "db_sortino": None, + "match": False, + "note": "no_db_row", + } + summary["missing_db"] += 1 + continue + + db_best = db_row.strategy + db_sortino = float(getattr(db_row, "sortino_ratio", 0) or 0) + + match = str(db_best).strip() == str(json_best).strip() + + if match: + summary["matched"] += 1 + else: + summary["mismatched"] += 1 + + results[symbol] = { + "json_best": json_best, + "json_sortino": json_sortino, + "db_best": db_best, + "db_sortino": db_sortino, + "match": match, + "note": None, + } + + finally: + session.close() + + OUTPUT.parent.mkdir(parents=True, exist_ok=True) + with OUTPUT.open("w") as f: + json.dump({"summary": summary, "results": results}, f, indent=2, default=str) + + print("Comparison complete.") + print("Summary:", summary) + print(f"Detailed results written to: {OUTPUT}") + + +if __name__ == "__main__": + main() diff --git a/scripts/compare_unified_vs_direct.py b/scripts/compare_unified_vs_direct.py new file mode 100644 index 0000000..674a49f --- /dev/null +++ b/scripts/compare_unified_vs_direct.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +""" +Compare DB-backed (unified) best-strategies CSV with direct backtests. + +Reads: exports/csv/2025/Q3/bonds_collection_best_strategies_Q3_2025.csv +Writes: exports/csv/compare_unified_vs_direct_bonds_Q3_2025.csv + +This script calls src.core.direct_backtest.run_direct_backtest for each symbol/strategy/timeframe +and compares key metrics (sortino_ratio, total_return). +""" + +from __future__ import annotations + +import csv +import datetime +import traceback +from pathlib import Path +from typing import Optional + +INPUT_CSV = Path("exports/csv/2025/Q3/bonds_collection_best_strategies_Q3_2025.csv") +OUT_CSV = Path("exports/csv/compare_unified_vs_direct_bonds_Q3_2025.csv") + +# Use direct backtest function from the project +try: + from src.core.direct_backtest import run_direct_backtest +except Exception as e: + raise RuntimeError(f"Could not import run_direct_backtest: {e}") from e + + +def as_float(v: Optional[str]) -> Optional[float]: + try: + if v is None or str(v).strip() == "": + return None + return float(v) + except Exception: + return None + + +def main(): + if not INPUT_CSV.exists(): + raise SystemExit(f"Input CSV not found: {INPUT_CSV}") + + today = datetime.datetime.now(datetime.timezone.utc).date().isoformat() + # Use a very wide start to emulate 'max'; data manager will clamp to available history. + start_date = "1900-01-01" + end_date = today + + with INPUT_CSV.open() as fh: + reader = csv.DictReader(fh) + rows = list(reader) + + out_rows = [] + total = len(rows) + succeeded = 0 + failed = 0 + + for i, r in enumerate(rows, start=1): + asset = r.get("Asset") or r.get("symbol") or r.get("Asset") + strategy = r.get("Best_Strategy") or r.get("BestStrategy") or "adx" + timeframe = r.get("Best_Timeframe") or r.get("Best_Timeframe") or "1d" + unified_sortino = as_float(r.get("Sortino_Ratio")) + unified_total = as_float(r.get("Total_Return_Pct")) + + print( + f"[{i}/{total}] Running direct backtest for {asset} / {strategy} / {timeframe}" + ) + try: + res = run_direct_backtest( + symbol=str(asset), + strategy_name=str(strategy), + start_date=start_date, + end_date=end_date, + timeframe=str(timeframe), + initial_capital=10000.0, + commission=0.001, + persistence_context=None, + ) + direct_sortino = None + direct_total = None + err = None + try: + native = res.get("bt_results") or {} + v = native.get("Sortino Ratio", None) + direct_sortino = float(v) if v is not None else None + except Exception: + direct_sortino = None + try: + native = res.get("bt_results") or {} + v2 = native.get("Return [%]", None) + direct_total = float(v2) if v2 is not None else None + except Exception: + direct_total = None + + succeeded += 1 + except Exception as e: + err = f"{e}\n{traceback.format_exc()}" + direct_sortino = None + direct_total = None + failed += 1 + + sortino_diff = ( + None + if (unified_sortino is None or direct_sortino is None) + else float(direct_sortino) - float(unified_sortino) + ) + total_diff = ( + None + if (unified_total is None or direct_total is None) + else float(direct_total) - float(unified_total) + ) + + out_rows.append( + { + "Asset": asset, + "Unified_Strategy": strategy, + "Unified_Timeframe": timeframe, + "Unified_Sortino": "" if unified_sortino is None else unified_sortino, + "Unified_TotalReturn": "" if unified_total is None else unified_total, + "Direct_Sortino": "" if direct_sortino is None else direct_sortino, + "Direct_TotalReturn": "" if direct_total is None else direct_total, + "Sortino_Diff": "" if sortino_diff is None else sortino_diff, + "TotalReturn_Diff": "" if total_diff is None else total_diff, + "Error": "" if err is None else err, + } + ) + + OUT_CSV.parent.mkdir(parents=True, exist_ok=True) + with OUT_CSV.open("w", newline="") as fh: + fieldnames = [ + "Asset", + "Unified_Strategy", + "Unified_Timeframe", + "Unified_Sortino", + "Unified_TotalReturn", + "Direct_Sortino", + "Direct_TotalReturn", + "Sortino_Diff", + "TotalReturn_Diff", + "Error", + ] + writer = csv.DictWriter(fh, fieldnames=fieldnames) + writer.writeheader() + for r in out_rows: + writer.writerow(r) + + print(f"Done. Total={total} succeeded={succeeded} failed={failed}") + print(f"Wrote comparison CSV to {OUT_CSV}") + + +if __name__ == "__main__": + main() diff --git a/scripts/data_health_report.py b/scripts/data_health_report.py new file mode 100644 index 0000000..f5942ee --- /dev/null +++ b/scripts/data_health_report.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +""" +Data Health Report for a collection. + +Outputs CSV with: symbol, rows, first_date, last_date, stale (Y/N) +Optionally prints a summary to stdout. +""" + +from __future__ import annotations + +import argparse +import csv +from pathlib import Path +from typing import List + +import pandas as pd +from pandas.tseries.offsets import BDay + +from src.cli.unified_cli import load_collection_symbols, resolve_collection_path +from src.core.data_manager import UnifiedDataManager + + +def is_stale(last_date: pd.Timestamp) -> bool: + try: + expected = (pd.Timestamp.today().normalize() - BDay(1)).date() + return last_date.date() < expected + except Exception: + return False + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser(description="Data health report for a collection") + parser.add_argument("collection", help="Collection key or path to JSON") + parser.add_argument("--interval", default="1d") + parser.add_argument("--period", default="") + parser.add_argument("--out", default="artifacts/data_health.csv") + args = parser.parse_args(argv) + + p = ( + resolve_collection_path(args.collection) + if not Path(args.collection).exists() + else Path(args.collection) + ) + symbols: List[str] = load_collection_symbols(p) + dm = UnifiedDataManager() + + rows: List[dict] = [] + for s in symbols: + try: + df = dm.get_data( + s, + start_date="1900-01-01" if args.period == "max" else "2000-01-01", + end_date=pd.Timestamp.today().strftime("%Y-%m-%d"), + interval=args.interval, + use_cache=True, + period=args.period or None, + period_mode=args.period or None, + ) + if df is None or df.empty: + rows.append( + { + "symbol": s, + "rows": 0, + "first_date": "", + "last_date": "", + "stale": "Y", + } + ) + continue + first = df.index[0] + last = df.index[-1] + rows.append( + { + "symbol": s, + "rows": len(df), + "first_date": first.date().isoformat(), + "last_date": last.date().isoformat(), + "stale": "Y" if is_stale(last) else "N", + } + ) + except Exception: + rows.append( + { + "symbol": s, + "rows": 0, + "first_date": "", + "last_date": "", + "stale": "Y", + } + ) + + out_path = Path(args.out) + out_path.parent.mkdir(parents=True, exist_ok=True) + with out_path.open("w", newline="") as fh: + writer = csv.DictWriter( + fh, fieldnames=["symbol", "rows", "first_date", "last_date", "stale"] + ) + writer.writeheader() + for r in rows: + writer.writerow(r) + + print(f"Wrote data health report: {out_path}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/debug/memory_profile.py b/scripts/debug/memory_profile.py deleted file mode 100644 index 9a67091..0000000 --- a/scripts/debug/memory_profile.py +++ /dev/null @@ -1,54 +0,0 @@ -"""Memory profiling script for performance analysis.""" - -from __future__ import annotations - -import numpy as np -import pandas as pd -from memory_profiler import profile - - -@profile -def create_large_dataset(): - """Create a large dataset to test memory usage.""" - print("Creating large dataset...") - data = pd.DataFrame( - { - "Open": np.random.randn(100000).cumsum() + 100, - "High": np.random.randn(100000).cumsum() + 102, - "Low": np.random.randn(100000).cumsum() + 98, - "Close": np.random.randn(100000).cumsum() + 101, - "Volume": np.random.randint(1000, 10000, 100000), - } - ) - print(f"Dataset shape: {data.shape}") - return data - - -@profile -def process_data(data): - """Process the dataset with various calculations.""" - print("Processing data...") - data["SMA_20"] = data["Close"].rolling(window=20).mean() - data["SMA_50"] = data["Close"].rolling(window=50).mean() - data["RSI"] = calculate_rsi(data["Close"]) - data["Volatility"] = data["Close"].rolling(window=20).std() - print("Data processing complete") - return data - - -def calculate_rsi(prices, period=14): - """Calculate RSI.""" - delta = prices.diff() - gain = (delta.where(delta > 0, 0)).rolling(window=period).mean() - loss = (-delta.where(delta < 0, 0)).rolling(window=period).mean() - rs = gain / loss - rsi = 100 - (100 / (1 + rs)) - return rsi - - -if __name__ == "__main__": - print("Starting memory profiling...") - data = create_large_dataset() - processed_data = process_data(data) - print(f"Final dataset shape: {processed_data.shape}") - print("Memory profiling complete") diff --git a/scripts/init-db.sql b/scripts/init-db.sql deleted file mode 100644 index 765416b..0000000 --- a/scripts/init-db.sql +++ /dev/null @@ -1,133 +0,0 @@ --- Initialize Quant Trading System Database --- This script runs automatically when PostgreSQL container starts - --- Create extensions -CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; -CREATE EXTENSION IF NOT EXISTS "pg_stat_statements"; - --- Create schemas -CREATE SCHEMA IF NOT EXISTS market_data; -CREATE SCHEMA IF NOT EXISTS backtests; -CREATE SCHEMA IF NOT EXISTS portfolios; - --- Market data tables -CREATE TABLE IF NOT EXISTS market_data.price_history ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), - symbol VARCHAR(20) NOT NULL, - timestamp TIMESTAMP WITH TIME ZONE NOT NULL, - open DECIMAL(20,8) NOT NULL, - high DECIMAL(20,8) NOT NULL, - low DECIMAL(20,8) NOT NULL, - close DECIMAL(20,8) NOT NULL, - volume BIGINT, - data_source VARCHAR(50) NOT NULL, - created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), - UNIQUE(symbol, timestamp, data_source) -); - --- Backtest results -CREATE TABLE IF NOT EXISTS backtests.results ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), - name VARCHAR(255) NOT NULL, - symbols TEXT[] NOT NULL, - strategy VARCHAR(100) NOT NULL, - start_date DATE NOT NULL, - end_date DATE NOT NULL, - initial_capital DECIMAL(20,2) NOT NULL, - final_value DECIMAL(20,2) NOT NULL, - total_return DECIMAL(10,4) NOT NULL, - sharpe_ratio DECIMAL(10,4), - sortino_ratio DECIMAL(10,4), - calmar_ratio DECIMAL(10,4), - profit_factor DECIMAL(10,4), - max_drawdown DECIMAL(10,4), - volatility DECIMAL(10,4), - win_rate DECIMAL(10,4), - parameters JSONB, - created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() -); - --- Portfolio configurations -CREATE TABLE IF NOT EXISTS portfolios.configurations ( - id UUID PRIMARY KEY DEFAULT uuid_generate_v4(), - name VARCHAR(255) NOT NULL UNIQUE, - symbols TEXT[] NOT NULL, - weights DECIMAL[] DEFAULT NULL, - initial_capital DECIMAL(20,2) NOT NULL, - commission DECIMAL(8,6) DEFAULT 0.001, - slippage DECIMAL(8,6) DEFAULT 0.002, - optimization_metric VARCHAR(50) DEFAULT 'sortino_ratio', - secondary_metrics TEXT[] DEFAULT ARRAY['calmar_ratio', 'sharpe_ratio', 'profit_factor'], - config JSONB, - created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), - updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() -); - --- Create indexes for performance -CREATE INDEX IF NOT EXISTS idx_price_history_symbol_timestamp ON market_data.price_history(symbol, timestamp DESC); -CREATE INDEX IF NOT EXISTS idx_price_history_data_source ON market_data.price_history(data_source); -CREATE INDEX IF NOT EXISTS idx_backtest_results_created_at ON backtests.results(created_at DESC); -CREATE INDEX IF NOT EXISTS idx_backtest_results_strategy ON backtests.results(strategy); -CREATE INDEX IF NOT EXISTS idx_backtest_results_sortino_ratio ON backtests.results(sortino_ratio DESC); - --- Create update trigger for portfolios -CREATE OR REPLACE FUNCTION update_updated_at_column() -RETURNS TRIGGER AS $$ -BEGIN - NEW.updated_at = NOW(); - RETURN NEW; -END; -$$ language 'plpgsql'; - -CREATE TRIGGER update_portfolios_updated_at - BEFORE UPDATE ON portfolios.configurations - FOR EACH ROW EXECUTE FUNCTION update_updated_at_column(); - --- Insert default crypto portfolio -INSERT INTO portfolios.configurations (name, symbols, initial_capital, optimization_metric, secondary_metrics, config) -VALUES ( - 'Crypto Portfolio', - ARRAY['BTCUSDT', 'ETHUSDT', 'SOLUSDT', 'ADAUSDT', 'DOTUSDT'], - 10000.00, - 'sortino_ratio', - ARRAY['calmar_ratio', 'sharpe_ratio', 'profit_factor'], - '{ - "asset_type": "crypto", - "data_sources": { - "primary": ["bybit", "yahoo_finance"], - "fallback": ["alpha_vantage"] - }, - "risk_profile": "high", - "leverage": 1, - "rebalance_frequency": "weekly" - }'::jsonb -) ON CONFLICT (name) DO NOTHING; - --- Create user for application (if not exists) -DO $$ -BEGIN - IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'quantapp') THEN - CREATE USER quantapp WITH PASSWORD 'quantapp_secure_password'; - END IF; -END -$$; - --- Grant permissions -GRANT USAGE ON SCHEMA market_data TO quantapp; -GRANT USAGE ON SCHEMA backtests TO quantapp; -GRANT USAGE ON SCHEMA portfolios TO quantapp; -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA market_data TO quantapp; -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA backtests TO quantapp; -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA portfolios TO quantapp; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA market_data TO quantapp; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA backtests TO quantapp; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA portfolios TO quantapp; - --- Log completion -DO $$ -BEGIN - RAISE NOTICE 'Quant Trading System database initialized successfully'; - RAISE NOTICE 'Schemas created: market_data, backtests, portfolios'; - RAISE NOTICE 'Default crypto portfolio added'; - RAISE NOTICE 'Primary metric: sortino_ratio (hedge fund standard)'; -END $$; diff --git a/scripts/prefetch_all.py b/scripts/prefetch_all.py new file mode 100644 index 0000000..2298c6a --- /dev/null +++ b/scripts/prefetch_all.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +""" +Prefetch multiple collections in one command. + +Examples: + python scripts/prefetch_all.py bonds commodities --mode recent --interval 1d --recent-days 90 + python scripts/prefetch_all.py --all --mode full --interval 1d +""" + +from __future__ import annotations + +import argparse +from typing import List + +from scripts.prefetch_collection import prefetch as prefetch_one + +DEFAULT_COLLECTIONS = ["bonds", "commodities", "crypto", "forex", "indices"] + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser(description="Prefetch multiple collections") + parser.add_argument("collections", nargs="*") + parser.add_argument("--all", action="store_true") + parser.add_argument("--mode", choices=["full", "recent", "both"], default="recent") + parser.add_argument("--interval", default="1d") + parser.add_argument("--recent-days", type=int, default=90) + args = parser.parse_args(argv) + + collections: List[str] + if args.all or not args.collections: + collections = DEFAULT_COLLECTIONS + else: + collections = list(args.collections) + + for c in collections: + print(f"Prefetching {c} ...") + prefetch_one(c, args.mode, args.interval, args.recent_days) + + print("All prefetches complete.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/prefetch_collection.py b/scripts/prefetch_collection.py new file mode 100644 index 0000000..b361807 --- /dev/null +++ b/scripts/prefetch_collection.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +""" +Prefetch collection market data into the cache. + +Modes: +- full : fetch provider 'max' period for full snapshots (long TTL) +- recent : fetch last N days for recent overlay (short TTL) +- both : full followed by recent + +Examples: + python scripts/prefetch_collection.py bonds --mode full --interval 1d + python scripts/prefetch_collection.py config/collections/bonds.json --mode recent --interval 1d --recent-days 90 + +Cron (daily recent overlay at 01:30): + 30 1 * * * /usr/bin/env bash -lc 'cd /path/to/quant-system && \ + docker compose run --rm quant python scripts/prefetch_collection.py bonds --mode recent --interval 1d --recent-days 90 >/dev/null 2>&1' +""" + +from __future__ import annotations + +import argparse +import datetime as dt +from pathlib import Path +from typing import List + +from src.cli.unified_cli import load_collection_symbols, resolve_collection_path +from src.core.data_manager import UnifiedDataManager + + +def prefetch(collection: str, mode: str, interval: str, recent_days: int) -> None: + p = ( + resolve_collection_path(collection) + if not Path(collection).exists() + else Path(collection) + ) + symbols: List[str] = load_collection_symbols(p) + if not symbols: + print("No symbols found in collection") + return + + dm = UnifiedDataManager() + today = dt.datetime.now(dt.timezone.utc).date().isoformat() + + if mode in ("full", "both"): + print(f"[full] Fetching provider max for {len(symbols)} symbols @ {interval}") + dm.get_batch_data( + symbols, + start_date="1900-01-01", + end_date=today, + interval=interval, + use_cache=False, + period="max", + period_mode="max", + ) + + if mode in ("recent", "both"): + start_recent = ( + dt.datetime.now(dt.timezone.utc).date() + - dt.timedelta(days=int(recent_days)) + ).isoformat() + print( + f"[recent] Fetching {recent_days} days for {len(symbols)} symbols @ {interval}" + ) + dm.get_batch_data( + symbols, + start_date=start_recent, + end_date=today, + interval=interval, + use_cache=False, + ) + + print("Prefetch complete.") + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser(description="Prefetch collection data into cache") + parser.add_argument( + "collection", help="Collection key (e.g., bonds) or path to JSON" + ) + parser.add_argument("--mode", choices=["full", "recent", "both"], default="recent") + parser.add_argument("--interval", default="1d") + parser.add_argument("--recent-days", type=int, default=90) + args = parser.parse_args(argv) + + prefetch(args.collection, args.mode, args.interval, args.recent_days) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/scripts/run_direct_collection.py b/scripts/run_direct_collection.py new file mode 100644 index 0000000..3805fed --- /dev/null +++ b/scripts/run_direct_collection.py @@ -0,0 +1,318 @@ +#!/usr/bin/env python3 +""" +Run direct backtests for a portfolio JSON across multiple intervals using the +backtesting library as the single source of truth. + +This version persists all results to the project database (BestStrategy, +BacktestResult, Trades, etc.) so downstream reports and comparisons use the DB +instead of JSON. For convenience and offline inspection it still writes a +summary JSON to `exports/direct_portfolio_comparison.json`, but that file is +not intended to be a data source. + +Usage: + python3 scripts/run_direct_collection.py \ + --portfolio config/collections/bonds.json \ + --intervals "1m 5m 15m 1h 4h 1d 1wk" \ + [--start-date YYYY-MM-DD] [--end-date YYYY-MM-DD] [--period max|1y|...] + +Requirements: + - The database must be reachable (e.g., via docker-compose). The script will + attempt to create a Run row and then persist each backtest result under that + run_id. At the end it finalizes rankings and upserts BestStrategy. +""" + +from __future__ import annotations + +import argparse +import json +import logging +from datetime import datetime +from pathlib import Path + +from src.core.direct_backtest import finalize_persistence_for_run, run_direct_backtest +from src.core.external_strategy_loader import get_strategy_loader +from src.core.strategy import StrategyFactory + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("direct-portfolio") + + +def parse_args(): + p = argparse.ArgumentParser() + p.add_argument("--portfolio", required=True, help="Path to portfolio JSON") + p.add_argument( + "--intervals", + required=True, + help="Space-separated list of intervals (e.g. '1min 5min 15min 1h 4h 1d 1wk')", + ) + p.add_argument("--start-date", help="Optional start date (YYYY-MM-DD)") + p.add_argument("--end-date", help="Optional end date (YYYY-MM-DD)") + p.add_argument( + "--period", + default=None, + help="Optional provider period token (e.g. 'max', '1y'). When set, overrides start/end", + ) + p.add_argument("--initial-capital", type=float, default=10000.0) + return p.parse_args() + + +def main(): + args = parse_args() + portfolio_path = Path(args.portfolio) + if not portfolio_path.exists(): + logger.error("Portfolio file not found: %s", portfolio_path) + return + + with portfolio_path.open() as f: + portfolio_data = json.load(f) + + # Get first portfolio + portfolio_name = list(portfolio_data.keys())[0] + portfolio = portfolio_data[portfolio_name] + symbols = portfolio.get("symbols", []) + if not symbols: + logger.error("No symbols found in portfolio") + return + + # Ensure external strategies loader is initialized. + # Prefer using the host-side quant-strategies path if it exists; otherwise + # fall back to the default loader behavior which prefers the container-mounted + # external_strategies directory. + project_root = Path(__file__).resolve().parent.parent + quant_path = project_root / "quant-strategies" / "algorithms" / "python" + try: + if quant_path.exists(): + # Host-side quant-strategies available (development); point loader there. + get_strategy_loader(str(quant_path)) + else: + # No host mount for quant-strategies in container — let loader pick defaults + # (it will prefer /app/external_strategies if present). + get_strategy_loader() + except Exception: + logger.warning("Could not initialize external strategy loader") + + # Determine strategies to test (prefer all available) + all_strats = StrategyFactory.list_strategies().get("all", []) + strategies = all_strats if all_strats else ["rsi", "macd", "bollinger_bands"] + + intervals = args.intervals.split() + start_date = args.start_date or "1970-01-01" + end_date = args.end_date or datetime.now().strftime("%Y-%m-%d") + period = args.period # if provided, data manager will prefer this over start/end + # Prefer initial capital from portfolio config when present + initial_capital = portfolio.get("initial_capital", args.initial_capital) + + results = {} + + # Prepare a minimal manifest/run so that persistence_context can associate all + # backtests under a single run_id. We keep this local to avoid importing the + # full unified CLI; the DB helper provides a simple create_run_from_manifest. + run_id = None + target_metric = "sortino_ratio" + try: + import hashlib + + from src.database import unified_models as um # type: ignore[import-not-found] + + # Minimal plan for hashing + traceability + plan = { + "action": "direct", + "symbols": symbols, + "strategies": strategies, + "intervals": intervals, + "period_mode": str(period or "max"), + "start": start_date, + "end": end_date, + "initial_capital": float(initial_capital), + "commission": float(portfolio.get("commission", 0.001)), + "metric": target_metric, + } + plan_hash = hashlib.sha256( + json.dumps(plan, sort_keys=True, separators=(",", ":")).encode("utf-8") + ).hexdigest() + + manifest = { + "plan": {**plan, "plan_hash": plan_hash}, + "generated_at": datetime.utcnow().isoformat() + "Z", + } + + # Ensure tables exist (best-effort; safe if already created) + try: + um.create_tables() + except Exception: + pass + + run_obj = um.create_run_from_manifest(manifest) + run_id = getattr(run_obj, "run_id", None) + except Exception: + logger.warning( + "Database persistence unavailable; continuing without DB (JSON will still be written)" + ) + + def _sanitize_jsonable(obj): + """Best-effort conversion of stats to JSON-safe primitives.""" + try: + import math + + import numpy as _np # type: ignore[import-not-found] + import pandas as _pd # type: ignore[import-not-found] + except Exception: + math = None + _np = None + _pd = None + + # Pandas Series/DataFrame + try: + if _pd is not None and isinstance(obj, _pd.Series): + return {k: _sanitize_jsonable(v) for k, v in obj.to_dict().items()} + if _pd is not None and isinstance(obj, _pd.DataFrame): + return obj.to_dict(orient="records") + except Exception: + pass + + # Pandas Timestamp / datetime-like + try: + import datetime as _dt # type: ignore[import-not-found] + + if _pd is not None and isinstance(obj, _pd.Timestamp): + return obj.isoformat() + if isinstance(obj, (_dt.datetime, _dt.date)): + return obj.isoformat() + except Exception: + pass + + # Numpy scalars/arrays + if _np is not None and isinstance(obj, _np.generic): + try: + return _sanitize_jsonable(obj.item()) + except Exception: + pass + if _np is not None and isinstance(obj, _np.ndarray): + try: + return [_sanitize_jsonable(v) for v in obj.tolist()] + except Exception: + pass + + # Primitives + if obj is None or isinstance(obj, (str, bool, int)): + return obj + if isinstance(obj, float): + try: + if math and (math.isnan(obj) or math.isinf(obj)): + return None + except Exception: + return None + return obj + + # Collections + if isinstance(obj, dict): + out = {} + for k, v in obj.items(): + try: + out[str(k)] = _sanitize_jsonable(v) + except Exception: + out[str(k)] = None + return out + if isinstance(obj, (list, tuple)): + return [_sanitize_jsonable(v) for v in obj] + + # Fallback + try: + return str(obj) + except Exception: + return None + + total = len(symbols) * len(strategies) * len(intervals) + counter = 0 + + for interval in intervals: + for symbol in symbols: + for strategy in strategies: + counter += 1 + logger.info( + "[%d/%d] Running direct backtest %s %s @ %s", + counter, + total, + symbol, + strategy, + interval, + ) + try: + # If DB is available, pass persistence_context so direct_backtest + # will persist BacktestResult and Trades to the database. + persistence_context = ( + {"run_id": run_id, "target_metric": target_metric} + if run_id + else None + ) + result = run_direct_backtest( + symbol=symbol, + strategy_name=strategy, + start_date=start_date, + end_date=end_date, + timeframe=interval, + initial_capital=initial_capital, + commission=portfolio.get("commission", 0.001), + period=period, + persistence_context=persistence_context, + ) + # Collect native stats from backtesting library + stats = _sanitize_jsonable(result.get("bt_results")) + results.setdefault(symbol, {}).setdefault(interval, []).append( + { + "strategy": strategy, + "stats": stats, + "error": result.get("error"), + } + ) + except Exception as e: + logger.error( + "Direct backtest failed for %s %s %s: %s", + symbol, + strategy, + interval, + e, + ) + results.setdefault(symbol, {}).setdefault(interval, []).append( + {"strategy": strategy, "stats": {}, "error": str(e)} + ) + + # Finalize DB ranks/best strategies once all results are persisted. + try: + if run_id: + finalize_persistence_for_run(run_id, target_metric) + logger.info( + "Finalized DB aggregates for run_id=%s (metric=%s)", + run_id, + target_metric, + ) + except Exception: + logger.exception("Failed to finalize DB aggregates for run_id=%s", run_id) + + output_file = Path("exports") / "direct_portfolio_comparison.json" + output_file.parent.mkdir(parents=True, exist_ok=True) + with output_file.open("w") as f: + json.dump(results, f, indent=2, default=str) + + logger.info("Saved direct portfolio stats to %s", output_file) + + # Optionally generate a DB-backed HTML report for this portfolio using the same reporter + try: + from src.reporting.collection_report import DetailedPortfolioReporter + + portfolio_name = portfolio.get("name") or portfolio_path.stem + reporter = DetailedPortfolioReporter() + report_path = reporter.generate_comprehensive_report( + {"name": portfolio_name, "symbols": symbols}, + start_date=start_date, + end_date=end_date, + strategies=["best"], + timeframes=intervals, + ) + logger.info("Generated HTML report (DB-backed) at %s", report_path) + except Exception as e: + logger.warning("Could not generate HTML report: %s", e) + + +if __name__ == "__main__": + main() diff --git a/scripts/run_direct_sample.py b/scripts/run_direct_sample.py new file mode 100644 index 0000000..34f9bce --- /dev/null +++ b/scripts/run_direct_sample.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 +""" +Run direct backtest comparisons for a short sample of crypto assets. +Writes JSON output to exports/comparison_sample_Q3_2025.json +""" + +from __future__ import annotations + +import json +import logging +from datetime import datetime +from pathlib import Path + +from src.core.direct_backtest import run_strategy_comparison +from src.core.external_strategy_loader import get_strategy_loader + +# Ensure external loader points at quant-strategies algorithms python directory +project_root = Path(__file__).resolve().parent.parent +quant_path = project_root / "quant-strategies" / "algorithms" / "python" +get_strategy_loader(str(quant_path)) + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("direct-sample") + +# Strategies to test: use all external strategies discovered by StrategyFactory (fallback to default set) +from src.core.strategy import StrategyFactory + +_external_strats = StrategyFactory.list_strategies().get("external", []) +STRATEGIES = ( + _external_strats + if _external_strats + else ["BuyAndHold", "rsi", "macd", "bollinger_bands"] +) + +# Sample 10 representative assets (mix of BTC/ETH/large caps/mid/small caps) +SYMBOLS = [ + "BTCUSD", + "ETHUSD", + "SOLUSDT", + "BNBUSDT", + "AVAXUSDT", + "DOGEUSDT", + "SUIUSDT", + "RNDRUSDT", + "AGIXUSDT", + "AAVEUSDT", +] + +# Date range - using a broad crypto range (adjust if your local data differs) +START_DATE = "1970-01-01" +END_DATE = datetime.now().strftime("%Y-%m-%d") +# Comprehensive timeframes to test (user requested): 1m,5m,15m,1h,4h,1d,1wk +TIMEFRAMES = ["1m", "5m", "15m", "1h", "4h", "1d", "1wk"] +INITIAL_CAPITAL = 10000.0 + +results = {} +for timeframe in TIMEFRAMES: + logger.info("Running comparisons for timeframe %s", timeframe) + for symbol in SYMBOLS: + logger.info(" Running strategy comparison for %s (%s)", symbol, timeframe) + try: + comp = run_strategy_comparison( + symbol=symbol, + strategies=STRATEGIES, + start_date=START_DATE, + end_date=END_DATE, + timeframe=timeframe, + initial_capital=INITIAL_CAPITAL, + ) + # Store per-symbol per-timeframe + results.setdefault(symbol, {})[timeframe] = comp + logger.info( + " Completed %s %s: total_strategies=%s, successful=%s", + symbol, + timeframe, + comp.get("total_strategies"), + comp.get("successful_strategies"), + ) + except Exception as e: + logger.error(" Failed for %s %s: %s", symbol, timeframe, e) + results.setdefault(symbol, {})[timeframe] = {"error": str(e)} + +# Ensure output dir exists (should already) +output_path = Path("exports/comparison_sample_Q3_2025.json") +with output_path.open("w", encoding="utf-8") as f: + json.dump(results, f, indent=2, default=str) + +print(f"Saved sample comparison results to {output_path}") diff --git a/src/ai/ai_report_generator.py b/src/ai/ai_report_generator.py deleted file mode 100644 index 310ffe1..0000000 --- a/src/ai/ai_report_generator.py +++ /dev/null @@ -1,120 +0,0 @@ -"""AI Report Generator for Investment Recommendations.""" - -from __future__ import annotations - -from datetime import datetime -from pathlib import Path - -from .models import PortfolioRecommendation - - -class AIReportGenerator: - """Generates HTML reports for AI investment recommendations.""" - - def __init__(self): - self.output_dir = Path("exports/ai_reports") - self.output_dir.mkdir(parents=True, exist_ok=True) - - def generate_html_report( - self, recommendation: PortfolioRecommendation, portfolio_name: str - ) -> str: - """Generate HTML report for AI recommendations.""" - - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"ai_recommendations_{portfolio_name}_{timestamp}.html" - output_path = self.output_dir / filename - - html_content = self._create_html_content(recommendation, portfolio_name) - - output_path.write_text(html_content, encoding="utf-8") - return str(output_path) - - def _create_html_content( - self, recommendation: PortfolioRecommendation, portfolio_name: str - ) -> str: - """Create HTML content for AI recommendations.""" - - asset_rows = "" - for asset in recommendation.asset_recommendations: - confidence_color = ( - "#28a745" - if asset.confidence_score > 0.7 - else "#ffc107" - if asset.confidence_score > 0.5 - else "#dc3545" - ) - - asset_rows += f""" - - {asset.symbol} - {asset.strategy} - {asset.timeframe} - {asset.confidence_score:.3f} - {asset.sortino_ratio:.3f} - {asset.sharpe_ratio:.3f} - {asset.total_return:.2f}% - - {asset.recommendation_type} - - """ - - html_template = f""" - - - - - AI Investment Recommendations: {portfolio_name} - - - -
-
-

AI Investment Recommendations

-

Portfolio: {portfolio_name} • Risk Profile: {recommendation.risk_profile.title()}

-
- -
-
-

Portfolio Overview

-
Total Assets: {recommendation.total_assets}
-
Expected Return: {recommendation.expected_return:.2f}%
-
Confidence: {recommendation.confidence_score:.3f}
-

AI Analysis: {recommendation.reasoning}

-
- -

Asset Recommendations

- - - - - - - - - - - - - - - {asset_rows} - -
SymbolStrategyTimeframeConfidenceSortinoSharpeReturnAction
-
-
- -""" - - return html_template diff --git a/src/ai/investment_recommendations.py b/src/ai/investment_recommendations.py index b756472..52bc3c6 100644 --- a/src/ai/investment_recommendations.py +++ b/src/ai/investment_recommendations.py @@ -14,11 +14,11 @@ import pandas as pd from sqlalchemy.orm import Session -from src.ai.ai_report_generator import AIReportGenerator from src.ai.llm_client import LLMClient from src.ai.models import AssetRecommendation, PortfolioRecommendation from src.database.models import AIRecommendation, BacktestResult, BestStrategy from src.database.models import AssetRecommendation as DbAssetRecommendation +from src.reporting.ai_report_generator import AIReportGenerator class AIInvestmentRecommendations: @@ -227,7 +227,9 @@ def generate_recommendations( # Save to database and exports self._save_to_database(portfolio_rec, quarter, portfolio_name) - self._save_to_exports(recommendations, risk_tolerance, quarter, portfolio_name) + self._save_to_exports( + recommendations, risk_tolerance, quarter, portfolio_name, timeframe + ) return portfolio_rec @@ -454,7 +456,11 @@ def generate_portfolio_recommendations( # Save to markdown exports (skip database save due to model mismatch) self._save_to_exports( - filtered_portfolio.recommendations, risk_tolerance, quarter, portfolio_name + filtered_portfolio.recommendations, + risk_tolerance, + quarter, + portfolio_name, + timeframe, ) # Try to save to database (may fail due to model mismatch) @@ -469,10 +475,21 @@ def generate_portfolio_recommendations( if generate_html: # Generate HTML report report_generator = AIReportGenerator() - html_path = report_generator.generate_portfolio_html_report( + # Determine year/quarter parts from quarter token or now + from datetime import datetime as _dt + + if quarter and "_" in (quarter or ""): + quarter_part, year_part = quarter.split("_") + else: + now = _dt.now() + quarter_part = quarter or f"Q{(now.month - 1) // 3 + 1}" + year_part = str(now.year) + html_path = report_generator.generate_html_report( + recommendation=filtered_portfolio, portfolio_name=portfolio_name, - recommendations=filtered_portfolio, - quarter=quarter, + year=year_part, + quarter=quarter_part, + interval=timeframe, ) return filtered_portfolio, html_path @@ -1295,8 +1312,9 @@ def _save_to_exports( risk_tolerance: str, quarter: str, portfolio_name: Optional[str] = None, + interval: Optional[str] = None, ): - """Save recommendations to exports/recommendations folder as markdown following standard naming.""" + """Save recommendations to exports/ai_reco using unified filename convention.""" from datetime import datetime from pathlib import Path @@ -1310,15 +1328,18 @@ def _save_to_exports( year_part = str(current_date.year) # Create organized exports directory - exports_dir = Path("exports/recommendations") / year_part / quarter_part + exports_dir = Path("exports/ai_reco") / year_part / quarter_part exports_dir.mkdir(parents=True, exist_ok=True) - # Generate filename following collection_type_quarter_year pattern - collection_name = portfolio_name or "database" - # Convert to lowercase and replace spaces with underscores for filename - filename_collection = collection_name.lower().replace(" ", "_") + # Build unified filename: _Collection___.md + collection_name = portfolio_name or "All_Collections" + sanitized = ( + collection_name.replace(" ", "_").replace("/", "_").strip("_") + or "All_Collections" + ) + safe_interval = (interval or "multi").replace("/", "-") filename = ( - f"{filename_collection}_ai_recommendations_{quarter_part}_{year_part}.md" + f"{sanitized}_Collection_{year_part}_{quarter_part}_{safe_interval}.md" ) # Generate markdown content @@ -1331,6 +1352,41 @@ def _save_to_exports( with output_path.open("w", encoding="utf-8") as f: f.write(markdown_content) + # Also provide a CSV export for analysts + try: + import pandas as _pd + + rows = [] + for rec in recommendations: + rows.append( + { + "Symbol": rec.symbol, + "Strategy": rec.strategy, + "Timeframe": rec.timeframe, + "Allocation_Pct": rec.allocation_percentage, + "Risk_Level": rec.risk_level, + "Confidence": rec.confidence, + "Sortino": rec.sortino_ratio, + "Calmar": rec.calmar_ratio, + "Max_Drawdown_Pct": rec.max_drawdown, + "Sharpe(approx)": rec.sharpe_ratio, + "Total_Return_Pct": rec.total_return, + "Trading_Style": rec.trading_style, + "Risk_Per_Trade_Pct": rec.risk_per_trade, + "Position_Size_Pct": rec.position_size, + "Stop_Loss_Points": rec.stop_loss, + "Take_Profit_Points": rec.take_profit, + } + ) + df = _pd.DataFrame(rows) + csv_filename = filename.replace(".md", ".csv") + df.to_csv(exports_dir / csv_filename, index=False) + self.logger.info( + "AI recommendations CSV saved to %s", exports_dir / csv_filename + ) + except Exception as _e: + self.logger.debug("Could not write AI CSV export: %s", _e) + self.logger.info("AI recommendations saved to %s", output_path) def _generate_markdown_report( diff --git a/src/cli/unified_cli.py b/src/cli/unified_cli.py index 1bdf97e..1fa4d7f 100644 --- a/src/cli/unified_cli.py +++ b/src/cli/unified_cli.py @@ -1,2611 +1,1143 @@ +#!/usr/bin/env python3 """ -Unified CLI - Restructured command-line interface using unified components. -Removes duplication and provides comprehensive functionality. -""" - -from __future__ import annotations - -import argparse -import json -import logging -import time -from dataclasses import asdict -from datetime import datetime, timedelta -from pathlib import Path - -from src.core import ( - PortfolioManager, - UnifiedBacktestEngine, - UnifiedCacheManager, - UnifiedDataManager, -) -from src.core.backtest_engine import BacktestConfig, BacktestResult -from src.core.strategy import StrategyFactory, list_available_strategies - - -def get_earliest_data_date(portfolio_path: str) -> datetime: - """ - Get the earliest data date based on portfolio configuration. - - Args: - portfolio_path: Path to portfolio configuration file - - Returns: - Earliest reasonable data date - """ - # Default fallback date - default_date = datetime(2015, 1, 1) - - try: - if not portfolio_path: - return default_date - - portfolio_file = Path(portfolio_path) - if not portfolio_file.exists(): - return default_date - - with portfolio_file.open("r") as f: - portfolio_config = json.load(f) - - # Get the first portfolio config (since file contains nested portfolio) - portfolio_key = list(portfolio_config.keys())[0] - config = portfolio_config[portfolio_key] - - # Check metadata for best_data_coverage - metadata = config.get("metadata", {}) - data_coverage = metadata.get("best_data_coverage", "") - - if data_coverage and "-present" in data_coverage: - year_str = data_coverage.split("-")[0] - try: - year = int(year_str) - return datetime(year, 1, 1) - except ValueError: - pass - - # Asset type specific defaults - asset_type = config.get("asset_type", "") - if asset_type == "crypto": - return datetime(2017, 1, 1) # Crypto data typically starts around 2017 - if asset_type == "stocks": - return datetime(1990, 1, 1) # Stock data goes back further - if asset_type == "forex": - return datetime(2000, 1, 1) # Forex data availability - if asset_type == "commodities": - return datetime(2006, 1, 1) # Commodity data availability - if asset_type == "bonds": - return datetime(2003, 1, 1) # Bond data availability - - except Exception as e: - logging.warning( - "Could not determine earliest data date from portfolio config: %s", e - ) - - return default_date - - -def setup_logging(level: str = "INFO") -> None: - """Setup logging configuration.""" - log_level = getattr(logging, level.upper(), logging.INFO) - logging.basicConfig( - level=log_level, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" - ) - - -def create_parser() -> argparse.ArgumentParser: - """Create the main argument parser.""" - parser = argparse.ArgumentParser( - description="Unified Quant Trading System", - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - - parser.add_argument( - "--log-level", - choices=["DEBUG", "INFO", "WARNING", "ERROR"], - default="INFO", - help="Logging level", - ) - - subparsers = parser.add_subparsers(dest="command", help="Available commands") - - # Data commands - add_data_commands(subparsers) - - # Strategy commands - add_strategy_commands(subparsers) - - # Backtest commands - add_backtest_commands(subparsers) - - # Portfolio commands - add_portfolio_commands(subparsers) - - # Optimization commands - add_optimization_commands(subparsers) - - # Analysis commands - add_analysis_commands(subparsers) - - # Cache commands - add_cache_commands(subparsers) - - # Reports commands - add_reports_commands(subparsers) - - # AI commands - add_ai_commands(subparsers) - - # Validation commands - add_validation_commands(subparsers) - - return parser - - -def add_data_commands(subparsers): - """Add data management commands.""" - data_parser = subparsers.add_parser("data", help="Data management commands") - data_subparsers = data_parser.add_subparsers(dest="data_command") - - # Download command - download_parser = data_subparsers.add_parser( - "download", help="Download market data" - ) - download_parser.add_argument( - "--symbols", nargs="+", required=True, help="Symbols to download" - ) - download_parser.add_argument( - "--start-date", required=True, help="Start date (YYYY-MM-DD)" - ) - download_parser.add_argument( - "--end-date", required=True, help="End date (YYYY-MM-DD)" - ) - download_parser.add_argument("--interval", default="1d", help="Data interval") - download_parser.add_argument( - "--asset-type", - choices=["stocks", "crypto", "forex", "commodities"], - help="Asset type hint", - ) - download_parser.add_argument( - "--futures", action="store_true", help="Download crypto futures data" - ) - download_parser.add_argument( - "--force", action="store_true", help="Force download even if cached" - ) - - # Sources command - data_subparsers.add_parser("sources", help="Show available data sources") - - # Symbols command - symbols_parser = data_subparsers.add_parser( - "symbols", help="List available symbols" - ) - symbols_parser.add_argument( - "--asset-type", - choices=["stocks", "crypto", "forex"], - help="Filter by asset type", - ) - symbols_parser.add_argument("--source", help="Specific data source") - - -def add_strategy_commands(subparsers): - """Add strategy management commands.""" - strategy_parser = subparsers.add_parser( - "strategy", help="Strategy management commands" - ) - strategy_subparsers = strategy_parser.add_subparsers(dest="strategy_command") - - # List strategies - list_parser = strategy_subparsers.add_parser( - "list", help="List available strategies" - ) - list_parser.add_argument( - "--type", - choices=["builtin", "external", "all"], - default="all", - help="Filter by strategy type", - ) - - # Strategy info - info_parser = strategy_subparsers.add_parser( - "info", help="Get strategy information" - ) - info_parser.add_argument("name", help="Strategy name") - - # Test strategy - test_parser = strategy_subparsers.add_parser( - "test", help="Test strategy with sample data" - ) - test_parser.add_argument("name", help="Strategy name") - test_parser.add_argument("--symbol", default="AAPL", help="Symbol for testing") - test_parser.add_argument("--start-date", default="2023-01-01", help="Start date") - test_parser.add_argument("--end-date", default="2023-12-31", help="End date") - test_parser.add_argument("--parameters", help="JSON string of strategy parameters") - - -def add_backtest_commands(subparsers): - """Add backtesting commands.""" - backtest_parser = subparsers.add_parser("backtest", help="Backtesting commands") - backtest_subparsers = backtest_parser.add_subparsers(dest="backtest_command") - - # Single backtest - single_parser = backtest_subparsers.add_parser("single", help="Run single backtest") - single_parser.add_argument("--symbol", required=True, help="Symbol to backtest") - single_parser.add_argument("--strategy", required=True, help="Strategy to use") - single_parser.add_argument("--start-date", required=True, help="Start date") - single_parser.add_argument("--end-date", required=True, help="End date") - single_parser.add_argument("--interval", default="1d", help="Data interval") - single_parser.add_argument( - "--capital", type=float, default=10000, help="Initial capital" - ) - single_parser.add_argument( - "--commission", type=float, default=0.001, help="Commission rate" - ) - single_parser.add_argument( - "--parameters", help="JSON string of strategy parameters" - ) - single_parser.add_argument( - "--futures", action="store_true", help="Use futures mode" - ) - single_parser.add_argument( - "--no-cache", action="store_true", help="Disable caching" - ) - - # Batch backtest - batch_parser = backtest_subparsers.add_parser("batch", help="Run batch backtests") - batch_parser.add_argument( - "--symbols", nargs="+", required=True, help="Symbols to backtest" - ) - batch_parser.add_argument( - "--strategies", nargs="+", required=True, help="Strategies to use" - ) - batch_parser.add_argument("--start-date", required=True, help="Start date") - batch_parser.add_argument("--end-date", required=True, help="End date") - batch_parser.add_argument("--interval", default="1d", help="Data interval") - batch_parser.add_argument( - "--capital", type=float, default=10000, help="Initial capital" - ) - batch_parser.add_argument( - "--commission", type=float, default=0.001, help="Commission rate" - ) - batch_parser.add_argument( - "--max-workers", type=int, help="Maximum parallel workers" - ) - batch_parser.add_argument( - "--memory-limit", type=float, default=8.0, help="Memory limit in GB" - ) - batch_parser.add_argument("--asset-type", help="Asset type hint") - batch_parser.add_argument("--futures", action="store_true", help="Use futures mode") - batch_parser.add_argument( - "--save-trades", action="store_true", help="Save individual trades" - ) - batch_parser.add_argument( - "--save-equity", action="store_true", help="Save equity curves" - ) - batch_parser.add_argument("--output", help="Output file path") - - -def add_portfolio_commands(subparsers): - """Add portfolio management commands.""" - portfolio_parser = subparsers.add_parser( - "portfolio", help="Portfolio management commands" - ) - portfolio_subparsers = portfolio_parser.add_subparsers(dest="portfolio_command") - - # Backtest portfolio - backtest_parser = portfolio_subparsers.add_parser( - "backtest", help="Backtest portfolio" - ) - backtest_parser.add_argument( - "--symbols", nargs="+", required=True, help="Portfolio symbols" - ) - backtest_parser.add_argument("--strategy", required=True, help="Portfolio strategy") - backtest_parser.add_argument("--start-date", required=True, help="Start date") - backtest_parser.add_argument("--end-date", required=True, help="End date") - backtest_parser.add_argument("--weights", help="JSON string of symbol weights") - backtest_parser.add_argument("--interval", default="1d", help="Data interval") - backtest_parser.add_argument( - "--capital", type=float, default=10000, help="Initial capital" - ) - - # Test portfolio with all strategies - test_all_parser = portfolio_subparsers.add_parser( - "test-all", help="Test portfolio with all available strategies and timeframes" - ) - test_all_parser.add_argument( - "--portfolio", required=True, help="JSON file with portfolio definition" - ) - test_all_parser.add_argument( - "--start-date", help="Start date (defaults to earliest available)" - ) - test_all_parser.add_argument("--end-date", help="End date (defaults to today)") - test_all_parser.add_argument( - "--period", - choices=["max", "1y", "2y", "5y", "10y"], - default="max", - help="Time period", - ) - test_all_parser.add_argument( - "--metric", - choices=[ - "profit_factor", - "sharpe_ratio", - "sortino_ratio", - "total_return", - "max_drawdown", - ], - default="sharpe_ratio", - help="Primary metric for ranking", - ) - test_all_parser.add_argument( - "--timeframes", - nargs="+", - choices=["1min", "5min", "15min", "30min", "1h", "4h", "1d", "1wk"], - default=["1d"], - help="Timeframes to test (default: 1d)", - ) - test_all_parser.add_argument( - "--test-timeframes", - action="store_true", - help="Test all timeframes to find optimal timeframe per asset", - ) - test_all_parser.add_argument( - "--open-browser", action="store_true", help="Open results in browser" - ) - test_all_parser.add_argument( - "--keep-old-trades", - action="store_true", - help="Keep old trade records instead of overriding them (default: override)", - ) - - # Compare portfolios - compare_parser = portfolio_subparsers.add_parser( - "compare", help="Compare multiple portfolios" - ) - compare_parser.add_argument( - "--portfolios", required=True, help="JSON file with portfolio definitions" - ) - compare_parser.add_argument("--start-date", required=True, help="Start date") - compare_parser.add_argument("--end-date", required=True, help="End date") - compare_parser.add_argument("--output", help="Output file for results") - - # Investment plan - plan_parser = portfolio_subparsers.add_parser( - "plan", help="Generate investment plan" - ) - plan_parser.add_argument( - "--portfolios", required=True, help="JSON file with portfolio results" - ) - plan_parser.add_argument( - "--capital", type=float, required=True, help="Total capital to allocate" - ) - plan_parser.add_argument( - "--risk-tolerance", - choices=["conservative", "moderate", "aggressive"], - default="moderate", - help="Risk tolerance", - ) - plan_parser.add_argument("--output", help="Output file for investment plan") - - -def add_optimization_commands(subparsers): - """Add optimization commands.""" - opt_parser = subparsers.add_parser( - "optimize", help="Strategy optimization commands" - ) - opt_subparsers = opt_parser.add_subparsers(dest="optimize_command") - - # Single optimization - single_parser = opt_subparsers.add_parser("single", help="Optimize single strategy") - single_parser.add_argument("--symbol", required=True, help="Symbol to optimize") - single_parser.add_argument("--strategy", required=True, help="Strategy to optimize") - single_parser.add_argument("--start-date", required=True, help="Start date") - single_parser.add_argument("--end-date", required=True, help="End date") - single_parser.add_argument( - "--parameters", required=True, help="JSON file with parameter ranges" - ) - single_parser.add_argument( - "--method", - choices=["genetic", "grid", "bayesian"], - default="genetic", - help="Optimization method", - ) - single_parser.add_argument( - "--metric", default="sharpe_ratio", help="Optimization metric" - ) - single_parser.add_argument( - "--iterations", type=int, default=100, help="Maximum iterations" - ) - single_parser.add_argument( - "--population", - type=int, - default=50, - help="Population size for genetic algorithm", - ) - - # Batch optimization - batch_parser = opt_subparsers.add_parser( - "batch", help="Optimize multiple strategies" - ) - batch_parser.add_argument( - "--symbols", nargs="+", required=True, help="Symbols to optimize" - ) - batch_parser.add_argument( - "--strategies", nargs="+", required=True, help="Strategies to optimize" - ) - batch_parser.add_argument("--start-date", required=True, help="Start date") - batch_parser.add_argument("--end-date", required=True, help="End date") - batch_parser.add_argument( - "--parameters", required=True, help="JSON file with parameter ranges" - ) - batch_parser.add_argument( - "--method", - choices=["genetic", "grid", "bayesian"], - default="genetic", - help="Optimization method", - ) - batch_parser.add_argument( - "--max-workers", type=int, help="Maximum parallel workers" - ) - batch_parser.add_argument("--output", help="Output file for results") - - -def add_analysis_commands(subparsers): - """Add analysis and reporting commands.""" - analysis_parser = subparsers.add_parser( - "analyze", help="Analysis and reporting commands" - ) - analysis_subparsers = analysis_parser.add_subparsers(dest="analysis_command") - - # Generate report - report_parser = analysis_subparsers.add_parser( - "report", help="Generate analysis report" - ) - report_parser.add_argument( - "--input", required=True, help="Input JSON file with results" - ) - report_parser.add_argument( - "--type", - choices=["portfolio", "strategy", "optimization"], - required=True, - help="Report type", - ) - report_parser.add_argument("--title", help="Report title") - report_parser.add_argument( - "--format", choices=["html", "json"], default="html", help="Output format" - ) - report_parser.add_argument( - "--output-dir", default="exports/reports", help="Output directory" - ) - report_parser.add_argument( - "--no-charts", action="store_true", help="Disable charts" - ) - - # Compare strategies - compare_parser = analysis_subparsers.add_parser( - "compare", help="Compare strategy performance" - ) - compare_parser.add_argument( - "--results", nargs="+", required=True, help="Result files to compare" - ) - compare_parser.add_argument( - "--metric", default="sharpe_ratio", help="Primary comparison metric" - ) - compare_parser.add_argument("--output", help="Output file") - - -def add_cache_commands(subparsers): - """Add cache management commands.""" - cache_parser = subparsers.add_parser("cache", help="Cache management commands") - cache_subparsers = cache_parser.add_subparsers(dest="cache_command") - - # Cache stats - cache_subparsers.add_parser("stats", help="Show cache statistics") - - # Clear cache - clear_parser = cache_subparsers.add_parser("clear", help="Clear cache") - clear_parser.add_argument( - "--type", - choices=["data", "backtest", "optimization"], - help="Cache type to clear", - ) - clear_parser.add_argument("--symbol", help="Clear cache for specific symbol") - clear_parser.add_argument("--source", help="Clear cache for specific source") - clear_parser.add_argument( - "--older-than", type=int, help="Clear items older than N days" - ) - clear_parser.add_argument("--all", action="store_true", help="Clear all cache") - - -def add_reports_commands(subparsers): - """Add report management commands.""" - reports_parser = subparsers.add_parser("reports", help="Report management commands") - reports_subparsers = reports_parser.add_subparsers(dest="reports_command") - - # Organize existing reports - reports_subparsers.add_parser( - "organize", help="Organize existing reports into quarterly structure" - ) - - # List reports - list_parser = reports_subparsers.add_parser("list", help="List quarterly reports") - list_parser.add_argument("--year", type=int, help="Filter by year") - - # Cleanup old reports - cleanup_parser = reports_subparsers.add_parser( - "cleanup", help="Cleanup old reports" - ) - cleanup_parser.add_argument( - "--keep-quarters", - type=int, - default=8, - help="Number of quarters to keep (default: 8)", - ) - - # Get latest report - latest_parser = reports_subparsers.add_parser( - "latest", help="Get latest report for portfolio" - ) - latest_parser.add_argument("portfolio", help="Portfolio name") - - # CSV Export - csv_export_parser = reports_subparsers.add_parser( - "export-csv", help="Export portfolio data to CSV format" - ) - csv_export_parser.add_argument( - "--portfolio", - help="Portfolio configuration file path (optional, used for full format)", - ) - csv_export_parser.add_argument( - "--output", default="portfolio_raw_data.csv", help="Output CSV filename" - ) - csv_export_parser.add_argument( - "--format", - choices=["full", "best-strategies", "quarterly"], - default="full", - help="Export format: full data, best strategies only, or quarterly summary", - ) - csv_export_parser.add_argument( - "--columns", - nargs="+", - help="Custom column selection (use 'available' to see options)", - ) - csv_export_parser.add_argument( - "--quarter", help="Quarter for quarterly export (Q1, Q2, Q3, Q4)" - ) - csv_export_parser.add_argument("--year", help="Year for quarterly export (YYYY)") - - # TradingView alerts export - tv_export_parser = reports_subparsers.add_parser( - "export-tradingview", help="Export TradingView alerts from database" - ) - tv_export_parser.add_argument("--quarter", "-q", help="Quarter (Q1, Q2, Q3, Q4)") - tv_export_parser.add_argument("--year", "-y", help="Year (YYYY)") - - tv_export_parser.add_argument( - "--output-dir", - default="exports/tradingview_alerts", - help="Output directory for alert files", - ) - - -def add_ai_commands(subparsers): - """Add AI recommendation commands.""" - ai_parser = subparsers.add_parser( - "ai", help="AI-powered investment recommendations" - ) - ai_subparsers = ai_parser.add_subparsers(dest="ai_command") - - # Generate recommendations - recommend_parser = ai_subparsers.add_parser( - "recommend", help="Generate AI investment recommendations" - ) - recommend_parser.add_argument( - "--risk-tolerance", - "-r", - choices=["conservative", "moderate", "aggressive"], - default="moderate", - help="Risk tolerance level", - ) - recommend_parser.add_argument( - "--max-assets", - "-n", - type=int, - default=10, - help="Maximum number of assets to recommend", - ) - recommend_parser.add_argument( - "--min-confidence", - "-c", - type=float, - default=0.7, - help="Minimum confidence score (0-1)", - ) - recommend_parser.add_argument( - "--quarter", "-q", help="Specific quarter to analyze (e.g., Q3_2025)" - ) - recommend_parser.add_argument("--output", "-o", help="Output file path") - recommend_parser.add_argument( - "--format", - choices=["table", "json", "summary"], - default="table", - help="Output format", - ) - recommend_parser.add_argument( - "--timeframe", - "-t", - default="1h", - help="Trading timeframe (e.g., 5m, 15m, 1h, 4h, 1d). Affects trading parameters: <1h=scalping, >=1h=swing trading", - ) - - # Compare assets - compare_parser = ai_subparsers.add_parser("compare", help="Compare multiple assets") - compare_parser.add_argument("symbols", nargs="+", help="Asset symbols to compare") - compare_parser.add_argument("--strategy", "-s", help="Filter by specific strategy") - - # Portfolio-specific recommendations - portfolio_recommend_parser = ai_subparsers.add_parser( - "portfolio_recommend", - help="Generate portfolio-specific AI recommendations with HTML report", - ) - portfolio_recommend_parser.add_argument( - "--portfolio", "-p", required=True, help="Portfolio configuration file path" - ) - portfolio_recommend_parser.add_argument( - "--risk-tolerance", - "-r", - choices=["conservative", "moderate", "aggressive"], - default="moderate", - help="Risk tolerance level", - ) - portfolio_recommend_parser.add_argument( - "--max-assets", - "-n", - type=int, - default=10, - help="Maximum number of assets to recommend", - ) - portfolio_recommend_parser.add_argument( - "--min-confidence", - "-c", - type=float, - default=0.6, - help="Minimum confidence score (0-1)", - ) - portfolio_recommend_parser.add_argument( - "--quarter", "-q", help="Specific quarter to analyze (e.g., Q3_2025)" - ) - portfolio_recommend_parser.add_argument( - "--no-html", action="store_true", help="Skip HTML report generation" - ) - portfolio_recommend_parser.add_argument( - "--timeframe", - "-t", - default="1h", - help="Trading timeframe (e.g., 5m, 15m, 1h, 4h, 1d). Affects trading parameters: <1h=scalping, >=1h=swing trading", - ) - - # Explain recommendation - explain_parser = ai_subparsers.add_parser( - "explain", help="Explain specific asset recommendation" - ) - explain_parser.add_argument("symbol", help="Asset symbol") - explain_parser.add_argument("strategy", help="Strategy name") - - -def add_validation_commands(subparsers): - """Add metrics validation commands.""" - validation_parser = subparsers.add_parser( - "validate", help="Validate backtesting metrics against backtesting library" - ) - validation_subparsers = validation_parser.add_subparsers(dest="validation_command") - - # Validate single strategy - single_parser = validation_subparsers.add_parser( - "strategy", help="Validate metrics for a single best strategy" - ) - single_parser.add_argument("symbol", help="Symbol to validate") - single_parser.add_argument("strategy", help="Strategy name") - single_parser.add_argument( - "--timeframe", "-t", default="1d", help="Timeframe (default: 1d)" - ) - single_parser.add_argument( - "--tolerance", - type=float, - default=0.05, - help="Tolerance for differences (default: 0.05 = 5%)", - ) - - # Validate multiple strategies - batch_parser = validation_subparsers.add_parser( - "batch", help="Validate metrics for multiple best strategies" - ) - batch_parser.add_argument( - "--symbols", nargs="*", help="Specific symbols to validate (default: all)" - ) - batch_parser.add_argument( - "--limit", - "-n", - type=int, - default=10, - help="Number of strategies to validate (default: 10)", - ) - batch_parser.add_argument( - "--tolerance", - type=float, - default=0.05, - help="Tolerance for differences (default: 0.05 = 5%)", - ) - batch_parser.add_argument( - "--output", "-o", help="Output file path for detailed report" - ) - - -# Command implementations -def handle_data_command(args): - """Handle data management commands.""" - data_manager = UnifiedDataManager() - - if args.data_command == "download": - handle_data_download(args, data_manager) - elif args.data_command == "sources": - handle_data_sources(args, data_manager) - elif args.data_command == "symbols": - handle_data_symbols(args, data_manager) - else: - print("Available data commands: download, sources, symbols") - - -def handle_data_download(args, data_manager: UnifiedDataManager): - """Handle data download command.""" - logger = logging.getLogger(__name__) - logger.info("Downloading data for %s symbols", len(args.symbols)) - - successful = 0 - failed = 0 - - for symbol in args.symbols: - try: - if args.futures: - data = data_manager.get_crypto_futures_data( - symbol, - args.start_date, - args.end_date, - args.interval, - not args.force, - ) - else: - data = data_manager.get_data( - symbol, - args.start_date, - args.end_date, - args.interval, - not args.force, - args.asset_type, - ) - - if data is not None and not data.empty: - successful += 1 - logger.info("✅ %s: %s data points", symbol, len(data)) - else: - failed += 1 - logger.warning("❌ %s: No data", symbol) - - except Exception as e: - failed += 1 - logger.error("❌ %s: %s", symbol, e) - - logger.info("Download complete: %s successful, %s failed", successful, failed) - - -def handle_data_sources(args, data_manager: UnifiedDataManager): - """Handle data sources command.""" - sources = data_manager.get_source_status() - - print("\nAvailable Data Sources:") - print("=" * 50) - - for name, status in sources.items(): - print(f"\n{name.upper()}:") - print(f" Priority: {status['priority']}") - print(f" Rate Limit: {status['rate_limit']}s") - print(f" Batch Support: {status['supports_batch']}") - print(f" Futures Support: {status['supports_futures']}") - print( - f" Asset Types: {', '.join(status['asset_types']) if status['asset_types'] else 'All'}" - ) - print(f" Max Symbols/Request: {status['max_symbols_per_request']}") - - -def handle_data_symbols(args, data_manager: UnifiedDataManager): - """Handle data symbols command.""" - print("\nAvailable Symbols:") - print("=" * 30) - - if args.asset_type == "crypto" or not args.asset_type: - try: - crypto_futures = data_manager.get_available_crypto_futures() - if crypto_futures: - print(f"\nCrypto Futures ({len(crypto_futures)} symbols):") - for symbol in crypto_futures[:10]: # Show first 10 - print(f" {symbol}") - if len(crypto_futures) > 10: - print(f" ... and {len(crypto_futures) - 10} more") - except Exception as e: - print(f"Error fetching crypto symbols: {e}") - - print("\nNote: Stock and forex symbols depend on Yahoo Finance availability") - - -def handle_backtest_command(args): - """Handle backtesting commands.""" - if args.backtest_command == "single": - handle_single_backtest(args) - elif args.backtest_command == "batch": - handle_batch_backtest(args) - else: - print("Available backtest commands: single, batch") - - -def handle_single_backtest(args): - """Handle single backtest command.""" - logger = logging.getLogger(__name__) - - # Setup components - data_manager = UnifiedDataManager() - cache_manager = UnifiedCacheManager() - engine = UnifiedBacktestEngine(data_manager, cache_manager) - - # Parse custom parameters - custom_params = None - if args.parameters: - try: - custom_params = json.loads(args.parameters) - except json.JSONDecodeError as e: - logger.error("Invalid parameters JSON: %s", e) - return - - # Create config - config = BacktestConfig( - symbols=[args.symbol], - strategies=[args.strategy], - start_date=args.start_date, - end_date=args.end_date, - interval=args.interval, - initial_capital=args.capital, - commission=args.commission, - use_cache=not args.no_cache, - futures_mode=args.futures, - save_trades=True, # Enable trade history saving - ) - - # Run backtest - logger.info("Running backtest: %s/%s", args.symbol, args.strategy) - start_time = time.time() - - result = engine.run_backtest(args.symbol, args.strategy, config, custom_params) - - duration = time.time() - start_time - - # Display results - if result.error: - logger.error("Backtest failed: %s", result.error) - return - - print(f"\nBacktest Results for {args.symbol}/{args.strategy}") - print("=" * 50) - print(f"Duration: {duration:.2f}s") - print(f"Data Points: {result.data_points}") - - metrics = result.metrics - if metrics: - print("\nPerformance Metrics:") - print(f" Total Return: {metrics.get('total_return', 0):.2f}%") - print(f" Sharpe Ratio: {metrics.get('sharpe_ratio', 0):.3f}") - print(f" Max Drawdown: {metrics.get('max_drawdown', 0):.2f}%") - print(f" Win Rate: {metrics.get('win_rate', 0):.1f}%") - print(f" Number of Trades: {metrics.get('trades_count', 0)}") - - # Save to database for consistency with portfolio tests - try: - _save_backtest_to_database( - result, - f"single_{args.symbol}_{args.strategy}", - config, - "sortino_ratio", - args.timeframe, - ) - print(" ✅ Results saved to database") - except Exception as e: - print(f" ⚠️ Database save failed: {e}") - - -def handle_batch_backtest(args): - """Handle batch backtest command.""" - logger = logging.getLogger(__name__) - - # Setup components - data_manager = UnifiedDataManager() - cache_manager = UnifiedCacheManager() - engine = UnifiedBacktestEngine( - data_manager, cache_manager, args.max_workers, args.memory_limit - ) - - # Create config - config = BacktestConfig( - symbols=args.symbols, - strategies=args.strategies, - start_date=args.start_date, - end_date=args.end_date, - interval=args.interval, - initial_capital=args.capital, - commission=args.commission, - use_cache=True, - save_trades=args.save_trades, - save_equity_curve=args.save_equity, - memory_limit_gb=args.memory_limit, - max_workers=args.max_workers, - asset_type=args.asset_type, - futures_mode=args.futures, - ) - - # Run batch backtests - logger.info( - "Running batch backtests: %s symbols, %s strategies", - len(args.symbols), - len(args.strategies), - ) - - results = engine.run_batch_backtests(config) - - # Display summary - successful = [r for r in results if not r.error] - failed = [r for r in results if r.error] - - print("\nBatch Backtest Summary") - print("=" * 30) - print(f"Total: {len(results)}") - print(f"Successful: {len(successful)}") - print(f"Failed: {len(failed)}") - - if successful: - returns = [r.metrics.get("total_return", 0) for r in successful] - print("\nPerformance Summary:") - print(f" Average Return: {sum(returns) / len(returns):.2f}%") - print(f" Best Return: {max(returns):.2f}%") - print(f" Worst Return: {min(returns):.2f}%") - - # Top performers - top_performers = sorted( - successful, key=lambda x: x.metrics.get("total_return", 0), reverse=True - )[:5] - print("\nTop 5 Performers:") - for i, result in enumerate(top_performers): - print( - f" {i + 1}. {result.symbol}/{result.strategy}: {result.metrics.get('total_return', 0):.2f}%" - ) - - # Save results if output specified - if args.output: - output_data = [asdict(result) for result in results] - with Path(args.output).open("w") as f: - json.dump(output_data, f, indent=2, default=str) - logger.info("Results saved to %s", args.output) - - -def handle_portfolio_command(args): - """Handle portfolio management commands.""" - if args.portfolio_command == "backtest": - handle_portfolio_backtest(args) - elif args.portfolio_command == "test-all": - handle_portfolio_test_all(args) - elif args.portfolio_command == "compare": - handle_portfolio_compare(args) - elif args.portfolio_command == "plan": - handle_investment_plan(args) - else: - print("Available portfolio commands: backtest, test-all, compare, plan") - - -def handle_portfolio_test_all(args): - """Handle testing portfolio with all strategies.""" - import webbrowser - from datetime import datetime - - from src.cli.direct_backtest_cli import save_direct_backtest_to_database - from src.core.direct_backtest import run_direct_backtest - from src.reporting.detailed_portfolio_report import DetailedPortfolioReporter - - logger = logging.getLogger(__name__) - - # Load portfolio definition - try: - with Path(args.portfolio).open() as f: - portfolio_data = json.load(f) - - # Get the first (and likely only) portfolio from the file - portfolio_name = next(iter(portfolio_data.keys())) - portfolio_config = portfolio_data[portfolio_name] - except Exception as e: - logger.error("Error loading portfolio: %s", e) - return - - # Calculate date range based on period - end_date = ( - datetime.strptime(args.end_date, "%Y-%m-%d") - if hasattr(args, "end_date") and args.end_date - else datetime.now() - ) - - if args.period == "max": - start_date = get_earliest_data_date(args.portfolio) - elif args.period == "10y": - start_date = end_date - timedelta(days=365 * 10) - elif args.period == "5y": - start_date = end_date - timedelta(days=365 * 5) - elif args.period == "2y": - start_date = end_date - timedelta(days=365 * 2) - else: # default to max - start_date = get_earliest_data_date(args.portfolio) - - # Use provided dates if available - if hasattr(args, "start_date") and args.start_date: - start_date = datetime.strptime(args.start_date, "%Y-%m-%d") - if hasattr(args, "end_date") and args.end_date: - end_date = datetime.strptime(args.end_date, "%Y-%m-%d") - - # Use specified strategies or fallback - all_strategies = ["BuyAndHold", "rsi", "macd", "bollinger_bands"] - print(f"🔍 Testing with strategies: {all_strategies}") - - # Determine timeframes to test - if hasattr(args, "timeframes") and args.timeframes: - timeframes_to_test = args.timeframes - else: - timeframes_to_test = ["1d", "1wk"] - - total_combinations = ( - len(portfolio_config["symbols"]) * len(all_strategies) * len(timeframes_to_test) - ) - - print(f"\n🔍 Testing Portfolio: {portfolio_config['name']}") - print( - f"📅 Period: {start_date.strftime('%Y-%m-%d')} to {end_date.strftime('%Y-%m-%d')}" - ) - symbols_display = ", ".join(portfolio_config["symbols"][:5]) - if len(portfolio_config["symbols"]) > 5: - symbols_display += "..." - print(f"📊 Symbols: {symbols_display}") - print(f"⚙️ Strategies: {', '.join(all_strategies)}") - print(f"⏰ Timeframes: {', '.join(timeframes_to_test)}") - print(f"🔢 Total Combinations: {total_combinations:,}") - print(f"📈 Primary Metric: {args.metric}") - print("=" * 70) - - print("\n📊 Running direct backtests...") - - # Run actual backtests for each symbol-strategy-timeframe combination - total_tests = ( - len(portfolio_config["symbols"]) * len(all_strategies) * len(timeframes_to_test) - ) - current_test = 0 - - for symbol in portfolio_config["symbols"]: - for strategy in all_strategies: - for timeframe in timeframes_to_test: - current_test += 1 - print( - f" 🔄 Testing {symbol} with {strategy} on {timeframe} ({current_test}/{total_tests})" - ) - - try: - # Use direct backtest library - result_dict = run_direct_backtest( - symbol=symbol, - strategy_name=strategy, - start_date=start_date.strftime("%Y-%m-%d"), - end_date=end_date.strftime("%Y-%m-%d"), - timeframe=timeframe, - initial_capital=portfolio_config.get("initial_capital", 10000), - commission=portfolio_config.get("commission", 0.001), - ) - - # Save to database using new direct approach - if not result_dict.get("error"): - save_direct_backtest_to_database(result_dict, args.metric) - - metrics = result_dict["metrics"] - print( - f" ✅ Return: {metrics.get('total_return', 0):.2f}%, Sharpe: {metrics.get('sharpe_ratio', 0):.3f}" - ) - else: - print(f" ❌ Error: {result_dict['error']}") - - except Exception as e: - print( - f" ❌ Error testing {symbol} with {strategy} on {timeframe}: {e}" - ) - - # Generate detailed report with actual results - print("\n📊 Generating comprehensive HTML report...") - reporter = DetailedPortfolioReporter() - report_path = reporter.generate_comprehensive_report( - portfolio_config=portfolio_config, - start_date=start_date.strftime("%Y-%m-%d"), - end_date=end_date.strftime("%Y-%m-%d"), - strategies=all_strategies, - timeframes=timeframes_to_test, - ) - - print(f"\n📱 Comprehensive report generated: {report_path}") - - # Quick summary for CLI using database best strategies - print(f"\n📊 Quick Summary by {args.metric.replace('_', ' ').title()}:") - print("-" * 50) - - # Get best strategy performance from database - from src.database import get_db_session - from src.database.models import BestStrategy - - session = get_db_session() - try: - best_strategies = session.query(BestStrategy).all() - - if not best_strategies: - print("No strategies found in database") - else: - # Calculate average metric scores for each strategy - strategy_scores = {} - for best_strategy in best_strategies: - strategy_name = best_strategy.strategy - metric_value = getattr(best_strategy, args.metric, 0) - - if strategy_name not in strategy_scores: - strategy_scores[strategy_name] = [] - strategy_scores[strategy_name].append( - float(metric_value) if metric_value is not None else 0.0 - ) - - # Calculate averages - strategy_results = {} - for strategy_name, scores in strategy_scores.items(): - strategy_results[strategy_name] = ( - sum(scores) / len(scores) if scores else 0.0 - ) - - # Sort by metric (ascending for drawdown, descending for others) - reverse_sort = args.metric != "max_drawdown" - sorted_strategies = sorted( - strategy_results.items(), key=lambda x: x[1], reverse=reverse_sort - ) - - for i, (strategy, score) in enumerate(sorted_strategies, 1): - if args.metric == "sharpe_ratio": - print(f" {i}. {strategy:15} | Sharpe: {score:.3f}") - elif args.metric == "total_return": - print(f" {i}. {strategy:15} | Return: {score:.1f}%") - elif args.metric == "profit_factor": - print(f" {i}. {strategy:15} | Profit Factor: {score:.2f}") - elif args.metric == "sortino_ratio": - print(f" {i}. {strategy:15} | Sortino: {score:.3f}") - elif args.metric == "max_drawdown": - print(f" {i}. {strategy:15} | Max Drawdown: {score:.1f}%") - else: - print( - f" {i}. {strategy:15} | {args.metric.replace('_', ' ').title()}: {score:.3f}" - ) - - finally: - session.close() - - # Get the best overall strategy from the database - try: - best_strategy_name = _get_best_overall_strategy_from_db(args.metric) - print(f"\n🏆 Best Overall Strategy: {best_strategy_name}") - except Exception as e: - logger.warning("Could not determine best strategy: %s", e) - print("\n🏆 Best Overall Strategy: Check the detailed report") - - print( - "\n📊 Each asset analyzed with detailed KPIs, order history, and equity curves" - ) - print("💾 Report size optimized with compression") - - if hasattr(args, "open_browser") and args.open_browser: - import os - - webbrowser.open(f"file://{os.path.abspath(report_path)}") - print("📱 Detailed report opened in browser") - - -def handle_portfolio_backtest(args): - """Handle portfolio backtest command.""" - logger = logging.getLogger(__name__) - - # Parse weights - weights = None - if args.weights: - try: - weights = json.loads(args.weights) - except json.JSONDecodeError as e: - logger.error("Invalid weights JSON: %s", e) - return - - # Setup components - data_manager = UnifiedDataManager() - cache_manager = UnifiedCacheManager() - engine = UnifiedBacktestEngine(data_manager, cache_manager) - - # Create config - config = BacktestConfig( - symbols=args.symbols, - strategies=[args.strategy], - start_date=args.start_date, - end_date=args.end_date, - interval=args.interval, - initial_capital=args.capital, - use_cache=True, - ) - - # Run portfolio backtest - logger.info("Running portfolio backtest: %s symbols", len(args.symbols)) - - result = engine.run_portfolio_backtest(config, weights) - - # Display results - if result.error: - logger.error("Portfolio backtest failed: %s", result.error) - return - - # Save to database - try: - _save_backtest_to_database(result, "PORTFOLIO", config, "sortino_ratio", "1d") - logger.info("Backtest results saved to database") - except Exception as e: - logger.warning("Failed to save to database: %s", e) - - print("\nPortfolio Backtest Results") - print("=" * 30) - - metrics = result.metrics - if metrics: - print(f"Total Return: {metrics.get('total_return', 0):.2f}%") - print(f"Sharpe Ratio: {metrics.get('sharpe_ratio', 0):.3f}") - print(f"Max Drawdown: {metrics.get('max_drawdown', 0):.2f}%") - print(f"Volatility: {metrics.get('volatility', 0):.2f}%") - - -def _save_backtest_to_database( - backtest_result, - name_prefix: str = "", - config=None, - metric: str = "sortino_ratio", - timeframe: str = "1d", -): - """Save backtest result to PostgreSQL database and update best strategies.""" - from datetime import datetime - - from src.database import get_db_session - from src.database.models import BacktestResult as DBBacktestResult - - session = get_db_session() - - try: - # Save to legacy results table (for backward compatibility) - db_result = DBBacktestResult( - name=f"{name_prefix}_{backtest_result.strategy}_{backtest_result.symbol}", - symbols=[backtest_result.symbol] - if backtest_result.symbol != "PORTFOLIO" - else ["PORTFOLIO"], - strategy=backtest_result.strategy, - start_date=datetime.strptime(config.start_date, "%Y-%m-%d").date() - if config - else None, - end_date=datetime.strptime(config.end_date, "%Y-%m-%d").date() - if config - else None, - initial_capital=float(backtest_result.config.initial_capital), - final_value=float( - backtest_result.metrics.get( - "final_capital", backtest_result.config.initial_capital - ) - ), - total_return=float(backtest_result.metrics.get("total_return", 0)), - sortino_ratio=float(backtest_result.metrics.get("sortino_ratio", 0)), - calmar_ratio=float(backtest_result.metrics.get("calmar_ratio", 0)), - sharpe_ratio=float(backtest_result.metrics.get("sharpe_ratio", 0)), - profit_factor=float(backtest_result.metrics.get("profit_factor", 0)), - max_drawdown=float(backtest_result.metrics.get("max_drawdown", 0)), - volatility=float(backtest_result.metrics.get("volatility", 0)), - win_rate=float(backtest_result.metrics.get("win_rate", 0)), - trades_count=int(backtest_result.metrics.get("trades_count", 0)), - alpha=float(backtest_result.metrics.get("alpha", 0)), - beta=float(backtest_result.metrics.get("beta", 1)), - expectancy=float(backtest_result.metrics.get("expectancy", 0)), - average_win=float(backtest_result.metrics.get("average_win", 0)), - average_loss=float(backtest_result.metrics.get("average_loss", 0)), - total_fees=float(backtest_result.metrics.get("total_fees", 0)), - portfolio_turnover=float( - backtest_result.metrics.get("portfolio_turnover", 0) - ), - strategy_capacity=float( - backtest_result.metrics.get("strategy_capacity", 1000000) - ), - parameters=backtest_result.parameters or {}, - ) - - session.add(db_result) - session.flush() # Flush to get the ID - - # Save individual trades if available - if backtest_result.trades is not None and not backtest_result.trades.empty: - from src.database.models import Trade - - # Clean up old trades if override is enabled (default behavior) - if getattr(backtest_result.config, "override_old_trades", True): - old_backtest_results = ( - session.query(DBBacktestResult) - .filter( - DBBacktestResult.symbols.any(backtest_result.symbol), - DBBacktestResult.strategy == backtest_result.strategy, - DBBacktestResult.id - != db_result.id, # Don't delete the current record - ) - .all() - ) - - for old_result in old_backtest_results: - # Delete associated trades first - old_trades_deleted = ( - session.query(Trade) - .filter( - Trade.backtest_result_id == old_result.id, - Trade.symbol == backtest_result.symbol, - ) - .delete() - ) - - # Then delete the old backtest result - session.delete(old_result) - - if old_trades_deleted > 0: - print( - f"🧹 Cleaned up {old_trades_deleted} old trades and 1 old backtest result for {backtest_result.symbol}/{backtest_result.strategy}" - ) - - current_equity = float(backtest_result.config.initial_capital) - current_holdings = 0.0 - - for _, trade_row in backtest_result.trades.iterrows(): - # Backtesting library returns complete round-trip trades - # Each row has EntryTime, ExitTime, EntryPrice, ExitPrice - - entry_value = float(trade_row["Size"]) * float(trade_row["EntryPrice"]) - exit_value = float(trade_row["Size"]) * float(trade_row["ExitPrice"]) - - # Create ENTRY trade record - entry_trade = Trade( - backtest_result_id=db_result.id, - symbol=backtest_result.symbol, - strategy=backtest_result.strategy, - timeframe=getattr(backtest_result.config, "timeframe", timeframe), - trade_datetime=trade_row["EntryTime"], - side="BUY", - size=float(trade_row["Size"]), - price=float(trade_row["EntryPrice"]), - equity_before=current_equity, - equity_after=current_equity - entry_value, - ) - session.add(entry_trade) - - current_equity -= entry_value - current_holdings += float(trade_row["Size"]) - - # Create EXIT trade record - exit_trade = Trade( - backtest_result_id=db_result.id, - symbol=backtest_result.symbol, - strategy=backtest_result.strategy, - timeframe=getattr(backtest_result.config, "timeframe", timeframe), - trade_datetime=trade_row["ExitTime"], - side="SELL", - size=float(trade_row["Size"]), - price=float(trade_row["ExitPrice"]), - equity_before=current_equity, - equity_after=current_equity + exit_value, - ) - session.add(exit_trade) - - current_equity += exit_value - current_holdings -= float(trade_row["Size"]) - - # Recalculate correct final_value and total_return from actual trade data - logger = logging.getLogger(__name__) - logger.debug( - "Checking trade correction for %s/%s", - backtest_result.symbol, - backtest_result.strategy, - ) - logger.debug( - "Has trades attr: %s, Trade count: %s", - hasattr(backtest_result, "trades"), - len(backtest_result.trades) - if hasattr(backtest_result, "trades") - else "N/A", - ) - if hasattr(backtest_result, "trades") and len(backtest_result.trades) > 0: - # Get the final equity from the last trade calculation - final_trade_equity = current_equity + ( - current_holdings * float(backtest_result.trades.iloc[-1]["price"]) - ) - actual_total_return = ( - (final_trade_equity - float(backtest_result.config.initial_capital)) - / float(backtest_result.config.initial_capital) - ) * 100 - - # Recalculate advanced metrics from actual trade PnL data using pure Python - trade_pnls = [] - for _, trade_row in backtest_result.trades.iterrows(): - if "pnl" in trade_row and trade_row["pnl"] != 0: - trade_pnls.append(float(trade_row["pnl"])) - - if trade_pnls: - winning_trades = [pnl for pnl in trade_pnls if pnl > 0] - losing_trades = [pnl for pnl in trade_pnls if pnl < 0] - initial_capital = float(backtest_result.config.initial_capital) - - # Calculate percentages of initial capital using pure Python - actual_avg_win = ( - (sum(winning_trades) / len(winning_trades) / initial_capital * 100) - if winning_trades - else 0 - ) - actual_avg_loss = ( - ( - sum(abs(loss) for loss in losing_trades) - / len(losing_trades) - / initial_capital - * 100 - ) - if losing_trades - else 0 - ) - actual_win_rate = ( - (len(winning_trades) / len(trade_pnls) * 100) if trade_pnls else 0 - ) - actual_expectancy = ( - (sum(trade_pnls) / len(trade_pnls) / initial_capital * 100) - if trade_pnls - else 0 - ) - else: - actual_avg_win = actual_avg_loss = actual_win_rate = ( - actual_expectancy - ) = 0 - - # Update the database record with correct values (ensure all are Python native types) - db_result.final_value = float(final_trade_equity) - db_result.total_return = float(actual_total_return) - db_result.trades_count = len(backtest_result.trades) - - # For advanced metrics, ensure complete type conversion - import builtins - - db_result.average_win = builtins.float(actual_avg_win) - db_result.average_loss = builtins.float(actual_avg_loss) - db_result.win_rate = builtins.float(actual_win_rate) - db_result.expectancy = builtins.float(actual_expectancy) - - # Also update the backtest_result metrics so best strategy update uses correct values - backtest_result.metrics["final_capital"] = final_trade_equity - backtest_result.metrics["total_return"] = actual_total_return - backtest_result.metrics["trades_count"] = len(backtest_result.trades) - backtest_result.metrics["average_win"] = actual_avg_win - backtest_result.metrics["average_loss"] = actual_avg_loss - backtest_result.metrics["win_rate"] = actual_win_rate - backtest_result.metrics["expectancy"] = actual_expectancy - - logger.debug( - "Corrected %s/%s - Final: $%.2f, Return: %.2f%%, Trades: %d", - backtest_result.symbol, - backtest_result.strategy, - final_trade_equity, - actual_total_return, - len(backtest_result.trades), - ) - logger.debug( - "Advanced metrics - avg_win: %.2f%%, avg_loss: %.2f%%, win_rate: %.1f%%", - actual_avg_win, - actual_avg_loss, - actual_win_rate, - ) - - # Update best_strategies table if this is a better result (after correction) - if backtest_result.symbol != "PORTFOLIO": - run_id = f"{name_prefix}_{datetime.now().strftime('%Y%m%d_%H%M%S')}" - _update_best_strategy( - session, - backtest_result, - run_id, - getattr(backtest_result.config, "timeframe", timeframe), - db_result, - metric, - ) - - session.commit() - - except Exception as e: - session.rollback() - raise e - finally: - session.close() - - -def _get_best_overall_strategy_from_db(metric: str = "sortino_ratio") -> str: - """Get the best overall strategy from the database based on the specified metric.""" - from src.database.db_connection import get_db_session - from src.database.models import BestStrategy +Unified CLI entrypoint: src.cli.unified_cli - session = get_db_session() +This module implements the `collection` subcommand which builds a deterministic +plan (plan_hash), writes a manifest, supports --dry-run, and delegates work to +the project's backtest engine and DB/persistence layers if available. - try: - # Get the strategy with the highest average score for the metric - best_strategies = session.query(BestStrategy).all() - - if not best_strategies: - return "No strategies found" - - # Calculate average metric scores for each strategy - strategy_scores = {} - for best_strategy in best_strategies: - strategy_name = best_strategy.strategy - metric_value = getattr(best_strategy, metric, 0) - - if strategy_name not in strategy_scores: - strategy_scores[strategy_name] = [] - strategy_scores[strategy_name].append( - float(metric_value) if metric_value is not None else 0.0 - ) +This is intentionally conservative: it validates inputs, expands strategies and +intervals where possible, and provides clear hooks for the engine and DB code. +All optional integrations are guarded to avoid import-time failures. +""" - # Calculate averages and find the best - strategy_averages = {} - for strategy_name, scores in strategy_scores.items(): - strategy_averages[strategy_name] = ( - sum(scores) / len(scores) if scores else 0.0 - ) +from __future__ import annotations - if not strategy_averages: - return "No valid strategies found" +import argparse +import hashlib +import json +import logging +import shutil +import sys +from datetime import datetime +from pathlib import Path +from typing import Any, Dict, List, Optional, Sequence - # Sort by metric (ascending for drawdown, descending for others) - reverse_sort = metric != "max_drawdown" - best_strategy = ( - max(strategy_averages.items(), key=lambda x: x[1]) - if reverse_sort - else min(strategy_averages.items(), key=lambda x: x[1]) - ) +# Constants +DEFAULT_METRIC = "sortino_ratio" +SUPPORTED_INTERVALS = ["1m", "5m", "15m", "1h", "1d", "1wk", "1mo", "3mo"] +INTRADAY_MAX_DAYS = 60 +ONE_MINUTE_MAX_DAYS = 7 - return best_strategy[0] - - except Exception as e: - import logging - - logger = logging.getLogger(__name__) - logger.error("Error getting best strategy from database: %s", e) - return "Error retrieving best strategy" - finally: - session.close() - - -def _update_best_strategy( - session, - backtest_result, - run_id: str, - timeframe: str, - db_result, - metric: str = "sortino_ratio", -): - """Update best_strategies table if this result is better than existing.""" - from src.database.models import BestStrategy - - # Check if there's an existing best strategy for this symbol/timeframe - existing = ( - session.query(BestStrategy) - .filter_by(symbol=backtest_result.symbol, timeframe=timeframe) - .first() - ) +log = logging.getLogger("unified_cli") - current_metric_value = float(backtest_result.metrics.get(metric, 0)) - current_sortino = float(backtest_result.metrics.get("sortino_ratio", 0)) - # Save the best strategy per asset based on CLI-specified metric - # For max_drawdown, lower is better; for all others, higher is better - # Prefer strategies with actual trades over strategies with no trades - is_better = False - current_trades_count = int(backtest_result.metrics.get("trades_count", 0)) +# Install a global excepthook that will log uncaught exceptions with a full traceback. +# This is useful when running inside Docker where stderr/telnet output may be suppressed. +def _unified_excepthook(exc_type, exc_value, tb): + import traceback as _traceback - if not existing: - is_better = True + try: + log.exception("Uncaught exception", exc_info=(exc_type, exc_value, tb)) + except Exception: + # If logging fails for any reason, still print the traceback to stderr. + _traceback.print_exception(exc_type, exc_value, tb) else: - existing_metric_value = getattr(existing, metric, 0) or 0 - existing_trades_count = existing.trades_count or 0 - - # Prefer strategies with actual trades - if current_trades_count > 0 and existing_trades_count == 0: - is_better = True - elif current_trades_count == 0 and existing_trades_count > 0: - is_better = False - elif current_trades_count == 0 and existing_trades_count == 0: - # Both have no trades - skip this strategy entirely - is_better = False - else: - # Both have trades - compare by metric - # Special handling: Real metrics (non-zero) should always replace fake zero metrics - if existing_metric_value == 0.0 and current_metric_value != 0.0: - # Real metric replacing fake zero metric - always better - is_better = True - elif metric == "max_drawdown": - is_better = current_metric_value < existing_metric_value - else: - is_better = current_metric_value > existing_metric_value - - if is_better: - # Calculate risk score (normalized combination of drawdown and volatility) - max_dd = float(backtest_result.metrics.get("max_drawdown", 0)) - volatility = float(backtest_result.metrics.get("volatility", 0)) - risk_score = (max_dd + volatility) / 2 # Simple risk score + _traceback.print_exception(exc_type, exc_value, tb) + + +import sys as _sys + +_sys.excepthook = _unified_excepthook + + +def _setup_logging(level: str) -> None: + numeric = getattr(logging, level.upper(), logging.INFO) + logging.basicConfig(level=numeric, format="%(asctime)s %(levelname)s %(message)s") + + +def resolve_collection_path(collection_arg: str) -> Path: + p = Path(collection_arg) + if p.exists(): + return p.resolve() + # try config/collections/.json + base = Path("config") / "collections" + # Aliases for curated defaults + alias_map = { + # Curated defaults + "bonds": "bonds_core", + "commodities": "commodities_core", + "crypto": "crypto_liquid", + "forex": "forex_majors", + "indices": "indices_global_core", + # Convenience aliases + "tech_growth": "stocks_us_growth_core", + "us_mega": "stocks_us_mega_core", + "value": "stocks_us_value_core", + "quality": "stocks_us_quality_core", + "minvol": "stocks_us_minvol_core", + "global_factors": "stocks_global_factor_core", + } + key = alias_map.get(collection_arg, collection_arg) + candidates = [ + base / f"{key}.json", + base / "default" / f"{key}.json", + base / "custom" / f"{key}.json", + ] + for alt in candidates: + if alt.exists(): + return alt.resolve() + raise FileNotFoundError(f"Collection file not found: {collection_arg}") - # Calculate trading parameters based on timeframe - risk_per_trade, stop_loss, take_profit = _calculate_trading_parameters_for_db( - timeframe - ) - if existing: - # Update existing record - existing.best_strategy = backtest_result.strategy - existing.sortino_ratio = current_sortino - existing.sharpe_ratio = float( - backtest_result.metrics.get("sharpe_ratio", 0) - ) - existing.calmar_ratio = float( - backtest_result.metrics.get("calmar_ratio", 0) - ) - existing.profit_factor = float( - backtest_result.metrics.get("profit_factor", 0) - ) - existing.total_return = float( - backtest_result.metrics.get("total_return", 0) - ) - existing.max_drawdown = max_dd - existing.volatility = volatility - existing.win_rate = float(backtest_result.metrics.get("win_rate", 0)) - existing.trades_count = int(backtest_result.metrics.get("trades_count", 0)) - existing.alpha = float(backtest_result.metrics.get("alpha", 0)) - existing.beta = float(backtest_result.metrics.get("beta", 1)) - existing.expectancy = float(backtest_result.metrics.get("expectancy", 0)) - existing.average_win = float(backtest_result.metrics.get("average_win", 0)) - existing.average_loss = float( - backtest_result.metrics.get("average_loss", 0) - ) - existing.total_fees = float(backtest_result.metrics.get("total_fees", 0)) - existing.portfolio_turnover = float( - backtest_result.metrics.get("portfolio_turnover", 0) - ) - existing.strategy_capacity = float( - backtest_result.metrics.get("strategy_capacity", 1000000) - ) - existing.risk_score = risk_score - existing.risk_per_trade = risk_per_trade - existing.stop_loss_pct = stop_loss - existing.take_profit_pct = take_profit - existing.best_parameters = backtest_result.parameters or {} - existing.backtest_run_id = run_id - else: - # Create new record - best_strategy = BestStrategy( - symbol=backtest_result.symbol, - timeframe=timeframe, - strategy=backtest_result.strategy, - sortino_ratio=current_sortino, - calmar_ratio=float(backtest_result.metrics.get("calmar_ratio", 0)), - sharpe_ratio=float(backtest_result.metrics.get("sharpe_ratio", 0)), - total_return=float(backtest_result.metrics.get("total_return", 0)), - max_drawdown=max_dd, - backtest_result_id=db_result.id, - ) - session.add(best_strategy) +def compute_plan_hash(plan: Dict[str, Any]) -> str: + # Deterministic serialization: sort keys + payload = json.dumps( + plan, sort_keys=True, separators=(",", ":"), ensure_ascii=False + ) + return hashlib.sha256(payload.encode("utf-8")).hexdigest() -def _calculate_trading_parameters_for_db(timeframe: str) -> tuple[float, float, float]: - """Calculate trading parameters for database storage.""" - if timeframe in ["1m", "5m", "15m", "30m", "1h"]: # Scalping - return 0.01, 0.005, 0.01 # 1% risk, 0.5% SL, 1% TP - # Swing trading - return 0.02, 0.02, 0.04 # 2% risk, 2% SL, 4% TP +def load_collection_symbols(collection_path: Path) -> List[str]: + """ + Load symbols from a collection JSON. + + Supported formats: + - Plain list: ["AAPL", "MSFT", ...] + - Dict with top-level "symbols" (or "assets"/"symbols_list"): + {"symbols": ["AAPL", ...], ...} + - Named collection object (common in config/collections/*.json): + {"bonds": {"symbols": [...], "name": "...", ...}} + - Dict of multiple named collections: returns symbols for the first matching + collection that contains a 'symbols' list (best-effort). + """ + try: + with collection_path.open() as f: + data = json.load(f) + except Exception as exc: + raise RuntimeError( + f"Failed to read collection file {collection_path}: {exc}" + ) from exc + + # If the file itself is a plain list of symbols + if isinstance(data, list): + return [str(s).upper() for s in data] + + # If the file is a dict, try common keys first + if isinstance(data, dict): + # Direct keys that point to a symbols list + for key in ("symbols", "assets", "symbols_list"): + if key in data and isinstance(data[key], list): + return [str(s).upper() for s in data[key]] + + # If the file wraps one or more named collections (e.g., {"bonds": {...}}) + # find the first value that itself contains a 'symbols' list + for val in data.values(): + if isinstance(val, dict): + for key in ("symbols", "assets", "symbols_list"): + if key in val and isinstance(val[key], list): + return [str(s).upper() for s in val[key]] + + raise RuntimeError( + f"Collection JSON at {collection_path} missing 'symbols' list or unsupported format" + ) + + +def expand_strategies(strategies_arg: str) -> List[str]: + # strategies_arg can be comma-separated or 'all' + parts = [p.strip() for p in strategies_arg.split(",") if p.strip()] + if len(parts) == 1 and parts[0].lower() == "all": + # Prefer explicit environment variable or container-mounted path when running inside Docker. + # This avoids trying to read host paths from within the container. + try: + import os + candidates = [] + env_path = os.getenv("STRATEGIES_PATH") + if env_path: + candidates.append(env_path) -def save_optimization_to_database( - symbol: str, - strategy: str, - optimization_result, - timeframe: str = "1d", - portfolio_name: str | None = None, -): - """Save optimization results to PostgreSQL database with new normalized structure.""" - from datetime import datetime + # Common container mount used in docker-compose + candidates.append("/app/external_strategies") - from src.database import get_db_session - from src.database.models import AllOptimizationResult + # Host-local fallback (works when running on host) + candidates.append(str(Path("quant-strategies").resolve())) + candidates.append(str(Path("external_strategies").resolve())) - session = get_db_session() + from src.core.external_strategy_loader import get_strategy_loader + from src.core.strategy import StrategyFactory - try: - # Generate unique run_id for this optimization session - run_id = f"OPT_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{symbol}_{strategy}" - - # Save all optimization iterations to all_optimization_results - for i, iteration_data in enumerate(optimization_result.optimization_history): - # Extract metrics from iteration data - metrics = iteration_data.get("metrics", {}) - parameters = iteration_data.get("parameters", {}) - - opt_record = AllOptimizationResult( - run_id=run_id, - symbol=symbol, - strategy=strategy, - timeframe=timeframe, - parameters=parameters, - sortino_ratio=float(metrics.get("sortino_ratio", 0)), - sharpe_ratio=float(metrics.get("sharpe_ratio", 0)), - calmar_ratio=float(metrics.get("calmar_ratio", 0)), - profit_factor=float(metrics.get("profit_factor", 0)), - total_return=float(metrics.get("total_return", 0)), - max_drawdown=float(metrics.get("max_drawdown", 0)), - volatility=float(metrics.get("volatility", 0)), - win_rate=float(metrics.get("win_rate", 0)), - trades_count=int(metrics.get("trades_count", 0)), - iteration_number=i + 1, - optimization_metric="sortino_ratio", - portfolio_name=portfolio_name, + strategies = [] + for cand in candidates: + try: + if not cand: + continue + p = Path(cand) + if not p.exists(): + continue + loader = get_strategy_loader(str(cand)) + try: + strategies = StrategyFactory.list_strategies(loader=loader) + if isinstance(strategies, dict): + strategies = ( + strategies.get("all") + or strategies.get("external") + or [] + ) + except Exception: + strategies = [] + # If we found any, return them (deduplicated & sorted) + if strategies: + return sorted(set(strategies)) + # If loader supports listing candidates without importing, try that + try: + candidates_list = loader.list_strategy_candidates() + if candidates_list: + return sorted(set(candidates_list)) + except Exception: + pass + except Exception as exc: + # try next candidate, but log for diagnostics + log.debug("Strategy discovery failed for %s: %s", cand, exc) + continue + + # Last fallback: try the local algorithms/python dir if present + alt_dir = Path("quant-strategies") / "algorithms" / "python" + if alt_dir.exists(): + cand = [p.stem for p in alt_dir.glob("*.py") if p.is_file()] + if cand: + return sorted(set(cand)) + + # If nothing found, proceed with an empty list (safe default for dry-run/tests) + log.warning( + "Could not expand 'all' strategies: no strategy repository found; proceeding with none" + ) + return [] + except Exception as exc: + log.warning( + "Could not expand 'all' strategies: %s; proceeding with none", exc ) - session.add(opt_record) - - # Update best_optimization_results table - _update_best_optimization_result( - session, - symbol, - strategy, - optimization_result, - run_id, - timeframe, - portfolio_name, + return [] + + # explicit list + expanded: List[str] = [] + for part in parts: + expanded.extend([s.strip() for s in part.split("+") if s.strip()]) + return sorted(set(expanded)) + + +def expand_intervals(interval_arg: str) -> List[str]: + parts = [p.strip() for p in interval_arg.split(",") if p.strip()] + if len(parts) == 1 and parts[0].lower() == "all": + return SUPPORTED_INTERVALS.copy() + # validate + invalid = [p for p in parts if p not in SUPPORTED_INTERVALS] + if invalid: + raise RuntimeError( + f"Unknown intervals requested: {invalid}. Supported: {SUPPORTED_INTERVALS}" ) + return parts - session.commit() - - except Exception as e: - session.rollback() - raise e - finally: - session.close() - - -def _update_best_optimization_result( - session, - symbol: str, - strategy: str, - optimization_result, - run_id: str, - timeframe: str, - portfolio_name: str | None = None, -): - """Update best_optimization_results table if this result is better than existing.""" - from src.database.models import BestOptimizationResult - - # Check if there's an existing best result for this symbol/strategy/timeframe - existing = ( - session.query(BestOptimizationResult) - .filter_by(symbol=symbol, strategy=strategy, timeframe=timeframe) - .first() - ) - current_sortino = float(optimization_result.best_score) - - # Only update if this is better (higher score) or no existing record - if not existing or ( - existing.best_sortino_ratio - and current_sortino > float(existing.best_sortino_ratio) - ): - # Extract best metrics from optimization result - best_metrics = {} - if optimization_result.optimization_history: - # Find the iteration with the best score - best_iteration = max( - optimization_result.optimization_history, - key=lambda x: x.get("metrics", {}).get("sortino_ratio", 0), +def clamp_interval_period( + interval: str, start: Optional[str], end: Optional[str], period_mode: str +) -> Dict[str, Optional[str]]: + """ + Enforce provider constraints: + - 1m allowed only for last ONE_MINUTE_MAX_DAYS days + - <1d intraday intervals allowed only for last INTRADAY_MAX_DAYS days + Returns dict with possibly modified 'start'/'end' and 'period_mode' (may remain 'max') + """ + # This function returns the passed args unchanged by default. Real clamping requires querying provider + # for available date ranges; here we provide warnings and leave exact clamping to data manager. + if interval == "1m": + # warn user if period_mode == 'max' + if period_mode == "max": + log.warning( + "Interval '1m' may be limited to the last %d days by the data provider", + ONE_MINUTE_MAX_DAYS, ) - best_metrics = best_iteration.get("metrics", {}) - - if existing: - # Update existing record - existing.best_sortino_ratio = current_sortino - existing.best_sharpe_ratio = float(best_metrics.get("sharpe_ratio", 0)) - existing.best_calmar_ratio = float(best_metrics.get("calmar_ratio", 0)) - existing.best_profit_factor = float(best_metrics.get("profit_factor", 0)) - existing.best_total_return = float(best_metrics.get("total_return", 0)) - existing.best_max_drawdown = float(best_metrics.get("max_drawdown", 0)) - existing.best_volatility = float(best_metrics.get("volatility", 0)) - existing.best_win_rate = float(best_metrics.get("win_rate", 0)) - existing.best_trades_count = int(best_metrics.get("trades_count", 0)) - existing.best_parameters = optimization_result.best_parameters - existing.total_iterations = optimization_result.total_evaluations - existing.optimization_time_seconds = optimization_result.optimization_time - existing.optimization_run_id = run_id - existing.portfolio_name = portfolio_name - else: - # Create new record - best_opt_result = BestOptimizationResult( - symbol=symbol, - strategy=strategy, - timeframe=timeframe, - best_sortino_ratio=current_sortino, - best_sharpe_ratio=float(best_metrics.get("sharpe_ratio", 0)), - best_calmar_ratio=float(best_metrics.get("calmar_ratio", 0)), - best_profit_factor=float(best_metrics.get("profit_factor", 0)), - best_total_return=float(best_metrics.get("total_return", 0)), - best_max_drawdown=float(best_metrics.get("max_drawdown", 0)), - best_volatility=float(best_metrics.get("volatility", 0)), - best_win_rate=float(best_metrics.get("win_rate", 0)), - best_trades_count=int(best_metrics.get("trades_count", 0)), - best_parameters=optimization_result.best_parameters, - total_iterations=optimization_result.total_evaluations, - optimization_time_seconds=optimization_result.optimization_time, - optimization_run_id=run_id, - portfolio_name=portfolio_name, + elif interval in ("5m", "15m", "1h"): + if period_mode == "max": + log.warning( + "Intraday interval '%s' may be limited to the last %d days by the data provider", + interval, + INTRADAY_MAX_DAYS, ) - session.add(best_opt_result) + return {"start": start, "end": end, "period_mode": period_mode} -def handle_tradingview_export_command(args): - """Handle TradingView alerts export command.""" - from src.database import get_db_session - from src.utils.tradingview_alert_exporter import TradingViewAlertExporter +def write_manifest(outdir: Path, manifest: Dict[str, Any]) -> Path: + outdir.mkdir(parents=True, exist_ok=True) + manifest_path = outdir / "run_manifest.json" + with manifest_path.open("w", encoding="utf-8") as fh: + json.dump(manifest, fh, indent=2, sort_keys=True, ensure_ascii=False) + return manifest_path - try: - # Initialize with database session - db_session = get_db_session() - exporter = TradingViewAlertExporter(db_session=db_session) - print("📺 Exporting TradingView alerts from database...") +def try_get_git_sha(path: Path) -> Optional[str]: + # Try to read git sha for the given path if it's a git repo + git_exe = shutil.which("git") + if git_exe is None: + return None + if not (path / ".git").exists(): + return None + try: + import subprocess - generated_files = exporter.export_from_database( - quarter=args.quarter, year=args.year, output_dir=args.output_dir + out = subprocess.check_output( + [git_exe, "-C", str(path.resolve()), "rev-parse", "HEAD"], + stderr=subprocess.DEVNULL, ) + return out.decode().strip() + except Exception: + return None - if generated_files: - print(f"✅ TradingView alerts exported - {len(generated_files)} files:") - for file_path in generated_files: - print(f" 📄 {file_path}") - else: - print("❌ No high-performing strategies found for alerts") - - db_session.close() - - except Exception as e: - print(f"❌ TradingView export failed: {e}") - print("📺 Falling back to HTML report parsing...") - - # Fallback to HTML parsing - exporter = TradingViewAlertExporter() - generated_files = exporter.export_alerts_from_reports() - if generated_files: - print(f"✅ Fallback export completed: {generated_files[0]}") - - -def handle_portfolio_compare(args): - """Handle portfolio comparison command.""" - logger = logging.getLogger(__name__) - - # Load portfolio definitions +def persist_run_row_placeholder(manifest: Dict[str, Any]) -> None: + # Hook: try to persist the initial run row to DB using unified_models if available. try: - with Path(args.portfolios).open() as f: - portfolio_definitions = json.load(f) - except Exception as e: - logger.error("Error loading portfolios: %s", e) - return - - # Setup components - data_manager = UnifiedDataManager() - cache_manager = UnifiedCacheManager() - engine = UnifiedBacktestEngine(data_manager, cache_manager) - portfolio_manager = PortfolioManager() - - # Define all available strategies - all_strategies = ["rsi", "macd", "bollinger_bands", "sma_crossover"] - - # Run backtests for each portfolio - portfolio_results = {} - - for portfolio_name, portfolio_config in portfolio_definitions.items(): - logger.info("Backtesting portfolio: %s", portfolio_name) - - # Use strategies from config if provided, otherwise use all strategies - strategies_to_test = portfolio_config.get("strategies", all_strategies) - - config = BacktestConfig( - symbols=portfolio_config["symbols"], - strategies=strategies_to_test, - start_date=args.start_date, - end_date=args.end_date, - use_cache=True, - ) - - results = engine.run_batch_backtests(config) - portfolio_results[portfolio_name] = results - - # Analyze portfolios - analysis = portfolio_manager.analyze_portfolios(portfolio_results) - - # Display comparison - print("\nPortfolio Comparison Analysis") - print("=" * 40) - - for portfolio_name, summary in analysis["portfolio_summaries"].items(): - print(f"\n{portfolio_name.upper()}:") - print(f" Priority Rank: {summary['investment_priority']}") - print(f" Average Return: {summary['avg_return']:.2f}%") - print(f" Sharpe Ratio: {summary['avg_sharpe']:.3f}") - print(f" Risk Category: {summary['risk_category']}") - print(f" Overall Score: {summary['overall_score']:.1f}") + from src.database import unified_models - # Show investment recommendations - print("\nInvestment Recommendations:") - for rec in analysis["investment_recommendations"]: - print(f"\n{rec['priority_rank']}. {rec['portfolio_name']}") - print(f" Allocation: {rec['recommended_allocation_pct']:.1f}%") - print(f" Expected Return: {rec['expected_annual_return']:.2f}%") - print(f" Risk: {rec['risk_category']}") - - # Save results if output specified - if args.output: - with Path(args.output).open("w") as f: - json.dump(analysis, f, indent=2, default=str) - logger.info("Analysis saved to %s", args.output) - - -def handle_investment_plan(args): - """Handle investment plan generation.""" - logger = logging.getLogger(__name__) - - # Load portfolio results - try: - with Path(args.portfolios).open() as f: - portfolio_results_data = json.load(f) - except Exception as e: - logger.error("Error loading portfolio results: %s", e) - return - - # Convert to BacktestResult objects (simplified) - portfolio_results = {} - for portfolio_name, results_list in portfolio_results_data.items(): - results = [] - for result_data in results_list: - result = BacktestResult( - symbol=result_data["symbol"], - strategy=result_data["strategy"], - parameters=result_data.get("parameters", {}), - metrics=result_data.get("metrics", {}), - config=None, # Simplified - error=result_data.get("error"), + # unified_models should expose create_run_from_manifest(manifest) or similar. + if hasattr(unified_models, "create_run_from_manifest"): + unified_models.create_run_from_manifest(manifest) + log.info( + "Persisted run row to DB via unified_models.create_run_from_manifest" ) - results.append(result) - portfolio_results[portfolio_name] = results - - # Generate investment plan - portfolio_manager = PortfolioManager() - investment_plan = portfolio_manager.generate_investment_plan( - args.capital, portfolio_results, args.risk_tolerance - ) - - # Display investment plan - print("\nInvestment Plan") - print("=" * 20) - print(f"Total Capital: ${args.capital:,.2f}") - print(f"Risk Tolerance: {args.risk_tolerance.title()}") - - print("\nCapital Allocations:") - for allocation in investment_plan["allocations"]: - print( - f" {allocation['portfolio_name']}: ${allocation['allocation_amount']:,.2f} " - f"({allocation['allocation_percentage']:.1f}%)" + else: + log.debug( + "unified_models module found but create_run_from_manifest not present" + ) + except Exception: + log.debug( + "DB persistence not available (unified_models missing or failed). Continuing without DB." ) - print("\nExpected Portfolio Metrics:") - expected = investment_plan["expected_portfolio_metrics"] - print(f" Expected Return: {expected.get('expected_annual_return', 0):.2f}%") - print(f" Expected Volatility: {expected.get('expected_volatility', 0):.2f}%") - print(f" Expected Sharpe: {expected.get('expected_sharpe_ratio', 0):.3f}") - - # Save plan if output specified - if args.output: - with Path(args.output).open("w") as f: - json.dump(investment_plan, f, indent=2, default=str) - logger.info("Investment plan saved to %s", args.output) - - -def handle_cache_command(args): - """Handle cache management commands.""" - cache_manager = UnifiedCacheManager() - - if args.cache_command == "stats": - handle_cache_stats(args, cache_manager) - elif args.cache_command == "clear": - handle_cache_clear(args, cache_manager) - else: - print("Available cache commands: stats, clear") +def run_plan(manifest: Dict[str, Any], outdir: Path, dry_run: bool = False) -> int: + """ + Execute the resolved plan. -def handle_cache_stats(args, cache_manager: UnifiedCacheManager): - """Handle cache stats command.""" - stats = cache_manager.get_cache_stats() + This implementation delegates to src.core.direct_backtest.UnifiedBacktestEngine.run if available. + If unavailable, it will write a placeholder summary and return 0 on success. + """ + if dry_run: + print(json.dumps(manifest, indent=2, sort_keys=True, ensure_ascii=False)) + return 0 - print("\nCache Statistics") - print("=" * 20) - print( - f"Total Size: {stats['total_size_gb']:.2f} GB / {stats['max_size_gb']:.2f} GB" - ) - print(f"Utilization: {stats['utilization_percent']:.1f}%") + # Persist a run row (best-effort) + persist_run_row_placeholder(manifest) - print("\nBy Type:") - for cache_type, type_stats in stats["by_type"].items(): - print(f" {cache_type.title()}:") - print(f" Count: {type_stats['count']}") - print(f" Size: {type_stats['total_size_mb']:.1f} MB") + # If action is 'direct', use the direct backtester with DB persistence + try: + plan_action = manifest.get("plan", {}).get("action") + except Exception: + plan_action = None - print("\nBy Source:") - for source, source_stats in stats["by_source"].items(): - print(f" {source.title()}:") - print(f" Count: {source_stats['count']}") - print(f" Size: {source_stats['size_bytes'] / 1024**2:.1f} MB") + if plan_action == "direct": + try: + from src.core.data_manager import UnifiedDataManager + from src.core.direct_backtest import ( + finalize_persistence_for_run, + run_direct_backtest, + ) + except Exception: + log.exception("Direct backtester not available") + return 12 + + plan = manifest.get("plan", {}) + symbols = plan.get("symbols", []) + strategies = plan.get("strategies", []) + intervals = plan.get("intervals", ["1d"]) # usually one + period_mode = plan.get("period_mode", "max") + start = plan.get("start") or "" + end = plan.get("end") or "" + initial_capital = plan.get("initial_capital", 10000) + commission = plan.get("commission", 0.001) + target_metric = plan.get("metric", DEFAULT_METRIC) + plan_hash = plan.get("plan_hash") + + # Initialize external strategies loader when a path is available (container-safe) + try: + from src.core.external_strategy_loader import get_strategy_loader + spath = plan.get("strategies_path") + if spath: + get_strategy_loader(str(spath)) + except Exception: + # best-effort; loader may already be initialized elsewhere + pass -def handle_cache_clear(args, cache_manager: UnifiedCacheManager): - """Handle cache clear command.""" - logger = logging.getLogger(__name__) + # Ensure a run row exists + run_id = None + try: + from src.database import unified_models - if args.all: - logger.info("Clearing all cache...") - cache_manager.clear_cache() - else: - logger.info("Clearing cache with filters...") - cache_manager.clear_cache( - cache_type=args.type, - symbol=args.symbol, - source=args.source, - older_than_days=args.older_than, + run_obj = None + if hasattr(unified_models, "ensure_run_for_manifest"): + run_obj = unified_models.ensure_run_for_manifest(manifest) + else: + run_obj = unified_models.create_run_from_manifest(manifest) + run_id = getattr(run_obj, "run_id", None) + except Exception: + run_id = None + + persistence_context = ( + {"run_id": run_id, "target_metric": target_metric, "plan_hash": plan_hash} + if run_id + else None ) - logger.info("Cache cleared successfully") - - -def handle_strategy_command(args): - """Handle strategy management commands.""" - logger = logging.getLogger(__name__) - - if args.strategy_command == "list": - strategies = list_available_strategies() - strategy_type = args.type - - if strategy_type == "all": - print("Available Strategies:") - if strategies["builtin"]: - print(f"\nBuilt-in Strategies ({len(strategies['builtin'])}):") - for strategy in strategies["builtin"]: - print(f" - {strategy}") + # Optional: probe sources for best coverage and set ordering overrides (assume 'stocks' for bonds/ETFs) + try: + dm_probe = UnifiedDataManager() + asset_type_probe = "stocks" + sample_syms = symbols[: min(5, len(symbols))] + if sample_syms: + ordered = dm_probe.probe_and_set_order( + asset_type_probe, + sample_syms, + interval=intervals[0] if intervals else "1d", + ) + if ordered: + log.info( + "Source order override for %s: %s", asset_type_probe, ordered + ) + except Exception: + log.debug("Coverage probe failed; continuing with default ordering") - if strategies["external"]: - print(f"\nExternal Strategies ({len(strategies['external'])}):") - for strategy in strategies["external"]: - print(f" - {strategy}") - else: - strategy_list = strategies.get(strategy_type, []) - print(f"{strategy_type.title()} Strategies ({len(strategy_list)}):") - for strategy in strategy_list: - print(f" - {strategy}") + for interval in intervals: + for symbol in symbols: + for strat in strategies: + try: + _ = run_direct_backtest( + symbol=symbol, + strategy_name=strat, + start_date=start, + end_date=end, + timeframe=interval, + initial_capital=float(initial_capital), + commission=float(commission), + period=(period_mode if period_mode else None), + use_cache=bool(plan.get("use_cache", True)), + persistence_context=persistence_context, + ) + except Exception: + log.exception( + "Direct backtest failed for %s %s %s", + symbol, + strat, + interval, + ) + continue - elif args.strategy_command == "info": - try: - info = StrategyFactory.get_strategy_info(args.name) - print(f"Strategy: {info['name']}") - print(f"Type: {info['type']}") - print(f"Description: {info['description']}") - - if info.get("parameters"): - print("\nParameters:") - for param, value in info["parameters"].items(): - print(f" {param}: {value}") - except ValueError as e: - logger.error("Strategy not found: %s", e) - - elif args.strategy_command == "test": + # Finalize DB ranks/best strategy try: - # Parse parameters if provided - parameters = {} - if args.parameters: - parameters = json.loads(args.parameters) - - # Create strategy instance - strategy = StrategyFactory.create_strategy(args.name, parameters) - - # Get test data - data_manager = UnifiedDataManager() - logger.info("Fetching test data for %s...", args.symbol) - - data = data_manager.fetch_data( - symbol=args.symbol, - start_date=args.start_date, - end_date=args.end_date, - interval="1d", + if persistence_context: + finalize_persistence_for_run( + persistence_context.get("run_id"), target_metric + ) + except Exception: + log.exception( + "Finalization failed for run %s", + (persistence_context or {}).get("run_id"), ) - if data.empty: - logger.error("No data found for %s", args.symbol) - return - - # Generate signals - logger.info("Generating signals...") - signals = strategy.generate_signals(data) + return 0 - # Print summary - signal_counts = { - "Buy": (signals == 1).sum(), - "Sell": (signals == -1).sum(), - "Hold": (signals == 0).sum(), - } - - print(f"\nStrategy Test Results for {args.name}:") - print(f"Symbol: {args.symbol}") - print(f"Period: {args.start_date} to {args.end_date}") - print(f"Data points: {len(data)}") - print("Signal distribution:") - for signal_type, count in signal_counts.items(): - percentage = (count / len(signals)) * 100 - print(f" {signal_type}: {count} ({percentage:.1f}%)") - - # Show recent signals - recent_signals = signals.tail(10) - print("\nRecent signals:") - for date, signal in recent_signals.items(): - signal_name = {1: "BUY", -1: "SELL", 0: "HOLD"}[signal] - print(f" {date.strftime('%Y-%m-%d')}: {signal_name}") - - except Exception as e: - logger.error("Strategy test failed: %s", e) - - -def handle_csv_export_command(args): - """Handle CSV export command.""" - from src.utils.raw_data_csv_exporter import RawDataCSVExporter - - # Only do collection-based exports (no portfolio_raw_data.csv) - exporter = RawDataCSVExporter() - - # Show available columns if requested - if hasattr(args, "columns") and args.columns and "available" in args.columns: - print("Available columns for CSV export:") - for col in [ - "Symbol", - "Strategy", - "Sortino Ratio", - "Calmar Ratio", - "Total Return", - "Max Drawdown", - "Win Rate", - ]: - print(f" - {col}") - return - - # Handle quarterly exports from existing reports - if args.format == "quarterly": - if not args.quarter or not args.year: - print("❌ Quarter and year required for quarterly export") - return - - print(f"📊 Extracting data from database: {args.year} {args.quarter}") - - # Extract portfolio name from portfolio file if provided - portfolio_name = "all" - if args.portfolio: - portfolio_path = Path(args.portfolio) - if portfolio_path.exists(): - import json + # Delegate to engine if available (use the unified backtest engine implementation) + try: + from src.core.backtest_engine import UnifiedBacktestEngine + # The Backtest Engine class expects different init args; instantiate and run batch if available. + engine = UnifiedBacktestEngine() + # If engine exposes a run() method accepting manifest/outdir, prefer that; otherwise, run a batch run. + if hasattr(engine, "run"): + try: + res = engine.run(manifest=manifest, outdir=outdir) # type: ignore[attr-defined] + log.info( + "Engine run finished with result: %s", + getattr(res, "status", "unknown"), + ) + # Best-effort: if engine returned a summary dict, persist it to the outdir try: - with portfolio_path.open() as f: - portfolio_config = json.load(f) - portfolio_key = list(portfolio_config.keys())[0] - portfolio_name = portfolio_key + import json as _json # local import + + summary_path = Path(outdir) / "engine_run_summary.json" + if isinstance(res, dict): + try: + summary_path.parent.mkdir(parents=True, exist_ok=True) + with summary_path.open("w", encoding="utf-8") as fh: + _json.dump( + res, + fh, + indent=2, + sort_keys=True, + ensure_ascii=False, + ) + log.info("Wrote engine summary to %s", summary_path) + except Exception: + log.exception( + "Failed to write engine summary to %s", summary_path + ) except Exception: - portfolio_name = portfolio_path.stem - - output_paths = exporter.export_from_database_primary( - args.quarter, - args.year, - args.output, - "quarterly", - portfolio_name, - args.portfolio, - ) - - if output_paths: - print(f"✅ CSV export completed - {len(output_paths)} files:") - for path in output_paths: - print(f" 📄 {path}") - else: - print("❌ No quarterly reports found or CSV export failed") - return - - # Handle best-strategies format - requires quarter and year - if args.format == "best-strategies": - if not args.quarter or not args.year: - print("❌ Best strategies export requires --quarter and --year") - print("💡 Use: --format best-strategies --quarter Q3 --year 2025") - return + log.debug( + "Engine returned non-dict or failed to write summary (continuing)" + ) + return 0 + except Exception: + # fall back to batch behavior below + pass + # Fall back: attempt to run batch backtests using run_batch_backtests if manifest is compatible try: - print( - f"📊 Exporting best strategies from database: {args.year} {args.quarter}" - ) - - # Extract portfolio name from portfolio file if provided - portfolio_name = "all" - if args.portfolio: - portfolio_path = Path(args.portfolio) - if portfolio_path.exists(): - import json - - try: - with portfolio_path.open() as f: - portfolio_config = json.load(f) - portfolio_key = list(portfolio_config.keys())[0] - portfolio_name = portfolio_key - except Exception: - portfolio_name = portfolio_path.stem - - output_paths = exporter.export_from_database_primary( - quarter=args.quarter, - year=args.year, - output_filename=args.output, - export_format="best-strategies", - portfolio_name=portfolio_name, - portfolio_path=args.portfolio, - ) - - if output_paths: - print( - f"✅ Best strategies CSV export completed - {len(output_paths)} files:" + plan = manifest.get("plan", {}) + config_kwargs = { + "symbols": plan.get("symbols", []), + "strategies": plan.get("strategies", []), + "start_date": plan.get("start"), + "end_date": plan.get("end"), + "initial_capital": plan.get("initial_capital", 10000), + "interval": plan.get("intervals", ["1d"])[0] + if plan.get("intervals") + else "1d", + "max_workers": plan.get("max_workers", 4), + } + # Use BacktestConfig dataclass if available + try: + from src.core.backtest_engine import ( + BacktestConfig, # type: ignore[import-not-found] ) - for path in output_paths: - print(f" 📄 {path}") - else: - print("❌ No data found for export criteria") - return + cfg = BacktestConfig(**config_kwargs) + results = engine.run_batch_backtests(cfg) + log.info( + "Engine run_batch_backtests finished with %d results", len(results) + ) + return 0 + except Exception: + log.debug( + "Could not construct BacktestConfig; skipping engine batch run" + ) + except Exception: + log.debug("Engine fallback path failed") + # If we reach here, engine couldn't be driven programmatically + raise RuntimeError("Engine found but could not be executed with manifest") + except Exception as exc: + log.exception("Backtest engine not available or failed: %s", exc) + + # Fallback: write a minimal summaries JSON + summary = { + "manifest": manifest, + "status": "fallback_no_engine", + "timestamp": datetime.utcnow().isoformat() + "Z", + } + fallback_path = outdir / "run_summary_fallback.json" + with fallback_path.open("w", encoding="utf-8") as fh: + json.dump(summary, fh, indent=2, sort_keys=True, ensure_ascii=False) + log.warning("Wrote fallback summary to %s", fallback_path) + return 0 - except Exception as e: - print(f"❌ Database export failed: {e}") - return - # Handle full format - requires running backtests (fallback to old method) - print("❌ Full format export from portfolio config not implemented yet") - print( - "💡 Use quarterly format with --quarter and --year to extract from existing reports" +def handle_collection_run(argv: Sequence[str]) -> int: + parser = argparse.ArgumentParser( + prog="unified_cli collection", + description="Run unified backtests for a collection", + ) + parser.add_argument( + "collection", + help="Path to collection JSON file or collection key under config/collections", + ) + parser.add_argument( + "--action", + default="direct", + choices=[ + "backtest", + "direct", + "optimization", + "export", + "report", + "tradingview", + ], + help="Action to perform", + ) + parser.add_argument( + "--metric", + default=DEFAULT_METRIC, + help=f"Primary metric used for ranking (default: {DEFAULT_METRIC})", + ) + parser.add_argument( + "--strategies", + default="all", + help="Comma-separated strategies or 'all' (default: all)", + ) + period_group = parser.add_mutually_exclusive_group() + period_group.add_argument( + "--period", + default="max", + help="Named period token e.g. 1d, 1mo, 1y, ytd, max (default: max)", + ) + period_group.add_argument("--start", help="ISO start date YYYY-MM-DD") + parser.add_argument( + "--end", help="ISO end date YYYY-MM-DD (required when --start is given)" + ) + parser.add_argument( + "--interval", + default="all", + help="Comma-separated intervals or 'all' (default: all)", + ) + parser.add_argument( + "--no-cache", + action="store_true", + help="Bypass cache reads for data (fetch fresh)", + ) + parser.add_argument( + "--fresh", action="store_true", help="Alias for --no-cache (fetch fresh data)" + ) + parser.add_argument( + "--reset-db", + action="store_true", + help="Danger: drop and recreate DB tables before running", + ) + parser.add_argument( + "--exports", + default="", + help="Comma-separated export types to run (csv,report,tradingview,ai,all)", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Do not perform side effects; print manifest and exit", + ) + parser.add_argument( + "--outdir", + default=None, + help="Output directory for artifacts (default: artifacts/run_)", + ) + parser.add_argument("--log-level", default="INFO", help="Logging level") + parser.add_argument("--config", default=None, help="Path to config file (optional)") + parser.add_argument( + "--force", + action="store_true", + help="Force run even if plan_hash already succeeded", + ) + parser.add_argument( + "--max-workers", type=int, default=4, help="Concurrency for backtests" ) - print("💡 Example: --format quarterly --quarter Q4 --year 2023") - return + args = parser.parse_args(argv) + _setup_logging(args.log_level) -def handle_ai_command(args): - """Handle AI recommendation commands.""" - from src.ai.investment_recommendations import AIInvestmentRecommendations - from src.database import get_db_session + try: + collection_path = resolve_collection_path(args.collection) + except Exception as exc: + log.exception("Failed to resolve collection: %s", exc) + return 2 - if not args.ai_command: - print("AI command required. Use --help for options.") - return + try: + symbols = load_collection_symbols(collection_path) + except Exception as exc: + log.exception("Failed to load symbols from collection: %s", exc) + return 3 try: - session = get_db_session() - recommender = AIInvestmentRecommendations(session) - - if args.ai_command == "recommend": - portfolio_rec = recommender.generate_recommendations( - risk_tolerance=args.risk_tolerance, - min_confidence=args.min_confidence, - max_assets=args.max_assets, - quarter=args.quarter, - timeframe=args.timeframe, - ) + strategies = expand_strategies(args.strategies) + # Filter out filesystem artifacts or invalid candidates (e.g., __pycache__) + try: + strategies = [ + s + for s in strategies + if not (isinstance(s, str) and s.strip().startswith("__")) + ] + except Exception: + # Defensive: if filtering fails, keep original list + pass + except Exception as exc: + log.exception("Failed to resolve strategies: %s", exc) + return 4 - # Display based on format - if args.format == "json": - print(json.dumps(_portfolio_to_dict(portfolio_rec), indent=2)) - else: - _display_recommendations(portfolio_rec, args.format) + try: + intervals = expand_intervals(args.interval) + except Exception as exc: + log.exception("Failed to resolve intervals: %s", exc) + return 5 + + # Basic validation for start/end + if args.start and not args.end: + log.error("--end is required when --start is provided") + return 6 + + # compute outdir + ts = datetime.utcnow().strftime("%Y%m%d_%H%M%S") + outdir = Path(args.outdir) if args.outdir else Path("artifacts") / f"run_{ts}" + outdir = outdir.resolve() + + # Collect git SHAs (best-effort) + app_sha = try_get_git_sha(Path()) + strat_sha = try_get_git_sha(Path("quant-strategies")) + + # Build plan manifest + resolved_plan = { + "actor": "cli", + "action": args.action, + "collection": str(collection_path), + "symbols": sorted(symbols), + "strategies": sorted(strategies), + "intervals": sorted(intervals), + "metric": args.metric, + "period_mode": args.period if args.start is None else "start_end", + "start": args.start, + "end": args.end, + "exports": args.exports, + "dry_run": bool(args.dry_run), + "use_cache": not (args.no_cache or args.fresh), + "max_workers": int(args.max_workers), + "timestamp_utc": datetime.utcnow().isoformat() + "Z", + "git_sha_app": app_sha, + "git_sha_strat": strat_sha, + } - if args.output: - _save_recommendations(portfolio_rec, args.output, args.format) - print(f"Saved to {args.output}") + # Try to read initial_capital and commission from the collection file + try: + import json as _json + + with collection_path.open() as _fh: + _data = _json.load(_fh) + # Direct keys + ic = None + comm = None + if isinstance(_data, dict): + if "initial_capital" in _data: + ic = _data.get("initial_capital") + if "commission" in _data: + comm = _data.get("commission") + # Named collection wrapper + if (ic is None or comm is None) and _data: + try: + first = next(iter(_data.values())) + if isinstance(first, dict): + ic = first.get("initial_capital", ic) + comm = first.get("commission", comm) + except Exception: + pass + if ic is not None: + resolved_plan["initial_capital"] = float(ic) + if comm is not None: + resolved_plan["commission"] = float(comm) + except Exception: + # Ignore; defaults will be applied downstream + pass + + # Apply interval constraints (best-effort warning only) + for interval in resolved_plan["intervals"]: + _ = clamp_interval_period( + interval, + resolved_plan.get("start"), + resolved_plan.get("end"), + resolved_plan["period_mode"], + ) - elif args.ai_command == "compare": - comparison_df = recommender.get_asset_comparison( - args.symbols, args.strategy - ) - if not comparison_df.empty: - print(comparison_df.to_string(index=False)) - else: - print("No data found for specified assets") - - elif args.ai_command == "portfolio_recommend": - portfolio_rec, html_path = recommender.generate_portfolio_recommendations( - portfolio_config_path=args.portfolio, - risk_tolerance=args.risk_tolerance, - min_confidence=args.min_confidence, - max_assets=args.max_assets, - quarter=args.quarter, - timeframe=args.timeframe, - generate_html=not args.no_html, - ) + # Add strategies_path so worker processes can initialize the external strategy loader. + # Prefer an explicit environment variable (STRATEGIES_PATH) when set, then check + # the common container-mounted path (/app/external_strategies), then fall back to + # a local `quant-strategies` checkout or `external_strategies` directory. + # This ensures the CLI works both on the host and inside docker-compose containers. + try: + import os - # Display results - _display_recommendations(portfolio_rec, "table") + env_strat = os.getenv("STRATEGIES_PATH") + if env_strat: + resolved_plan["strategies_path"] = env_strat + else: + # Common mount inside the container used by docker-compose + container_path = Path("/app/external_strategies") + if container_path.exists(): + resolved_plan["strategies_path"] = str(container_path) + else: + # Host fallback: prefer local checkout 'quant-strategies' + strat_path = Path("quant-strategies").resolve() + if strat_path.exists(): + resolved_plan["strategies_path"] = str(strat_path) + else: + ext = Path("external_strategies") + resolved_plan["strategies_path"] = ( + str(ext.resolve()) if ext.exists() else None + ) + except Exception: + resolved_plan["strategies_path"] = None - if html_path: - print(f"\n📄 HTML report generated: {html_path}") + plan_hash = compute_plan_hash(resolved_plan) + resolved_plan["plan_hash"] = plan_hash - elif args.ai_command == "explain": - explanation = recommender.explain_recommendation(args.symbol, args.strategy) - if "error" in explanation: - print(f"Error: {explanation['error']}") - else: - print(f"Asset: {args.symbol} ({args.strategy})") - print(f"Summary: {explanation['summary']}") - if explanation.get("strengths"): - print("Strengths:", ", ".join(explanation["strengths"])) - if explanation.get("concerns"): - print("Concerns:", ", ".join(explanation["concerns"])) - print(f"Recommendation: {explanation['recommendation']}") - - session.close() - - except Exception as e: - print(f"AI command failed: {e}") - raise - - -def _display_recommendations(portfolio_rec, format_type): - """Display portfolio recommendations.""" - if format_type == "summary": - print(f"AI Recommendations ({portfolio_rec.risk_profile})") - print(f"Assets: {len(portfolio_rec.recommendations)}") - print(f"Score: {portfolio_rec.total_score:.2f}") - print(f"Confidence: {portfolio_rec.confidence:.1%}") - if portfolio_rec.recommendations: - top = portfolio_rec.recommendations[0] - print(f"Top: {top.symbol} ({top.allocation_percentage:.1f}%)") - else: - print(f"Portfolio Recommendations - {portfolio_rec.risk_profile.title()}") - print("=" * 120) - print( - f"{'Symbol':<12} {'Strategy':<12} {'Style':<6} {'TF':<4} {'Score':<6} {'Alloc%':<6} {'Risk%':<6} {'SL':<6} {'TP':<6} {'Conf%':<6}" - ) - print("=" * 120) - for rec in portfolio_rec.recommendations: - print( - f"{rec.symbol:<12} {rec.strategy:<12} {rec.trading_style:<6} {rec.timeframe:<4} " - f"{rec.score:<6.2f} {rec.allocation_percentage:<6.1f} {rec.risk_per_trade:<6.1f} " - f"{rec.stop_loss_points:<6.0f} {rec.take_profit_points:<6.0f} {rec.confidence:<6.1%}" - ) - print(f"\nAnalysis: {portfolio_rec.overall_reasoning}") - - if portfolio_rec.recommendations: - print("\nTrading Parameters Legend:") - print(" Style: Trading style (swing=≥1h timeframes, scalp=<1h timeframes)") - print(" TF: Timeframe") - print(" Risk%: Risk per trade (% of capital)") - print(" SL: Stop loss (points)") - print(" TP: Take profit (points)") - - -def _portfolio_to_dict(portfolio_rec): - """Convert portfolio recommendation to dict.""" - return { - "risk_profile": portfolio_rec.risk_profile, - "total_score": portfolio_rec.total_score, - "confidence": portfolio_rec.confidence, - "recommendations": [ - { - "symbol": rec.symbol, - "strategy": rec.strategy, - "score": rec.score, - "allocation_percentage": rec.allocation_percentage, - "sortino_ratio": rec.sortino_ratio, - "calmar_ratio": rec.calmar_ratio, - "max_drawdown": rec.max_drawdown, - } - for rec in portfolio_rec.recommendations - ], - "overall_reasoning": portfolio_rec.overall_reasoning, - "warnings": portfolio_rec.warnings, + manifest = { + "plan": resolved_plan, + "generated_at": datetime.utcnow().isoformat() + "Z", } + manifest_path = write_manifest(outdir, manifest) + log.info("Wrote run manifest to %s", manifest_path) -def _save_recommendations(portfolio_rec, output_path, format_type): - """Save recommendations to file.""" - Path(output_path).parent.mkdir(parents=True, exist_ok=True) + # Optional: reset DB (dangerous) + if args.reset_db and not args.dry_run: + try: + from src.database import unified_models # type: ignore[import-not-found] - if format_type == "json": - with Path(output_path).open("w") as f: - json.dump(_portfolio_to_dict(portfolio_rec), f, indent=2) - else: - with Path(output_path).open("w") as f: - f.write("AI Investment Recommendations\n") - f.write("=" * 50 + "\n\n") - for rec in portfolio_rec.recommendations: - f.write( - f"{rec.symbol} ({rec.strategy}): {rec.allocation_percentage:.1f}%\n" + unified_models.drop_tables() + unified_models.create_tables() + log.warning( + "Database tables dropped and recreated as requested (--reset-db)" + ) + except Exception: + log.exception("Failed to reset database tables") + return 9 + + # Dry-run behavior: print manifest and optionally generate exports from DB, then exit + if args.dry_run: + print(json.dumps(manifest, indent=2, sort_keys=True, ensure_ascii=False)) + + # If exports were requested (e.g., --exports=all or csv,report,tradingview), try to + # produce artifacts using the database-backed reporters/exporters. These operations + # are best-effort and guarded so the CLI still works in minimal environments. + exports_val = resolved_plan.get("exports", "") or "" + try: + exports_list = [ + e.strip().lower() for e in str(exports_val).split(",") if e.strip() + ] + except Exception: + exports_list = [] + + if exports_list: + # Enforce strict single-DB policy: verify primary DB is reachable before attempting any exports. + # If the DB is unreachable, fail fast with a clear error instructing the user to start docker-compose. + try: + from src.database.db_connection import ( + get_sync_engine, # type: ignore[import-not-found] ) + engine = get_sync_engine() + # quick connectivity test + conn = engine.connect() + conn.close() + except Exception as db_exc: + log.exception("Primary database is unreachable: %s", db_exc) + log.error( + "Primary DB must be running (this project expects a single DB managed by docker-compose). " + "Start it with: docker-compose up -d db (or docker-compose up -d) and retry." + ) + # Return non-zero exit code to indicate failure due to missing DB + return 10 -def handle_validation_command(args): - """Handle metrics validation commands.""" - from src.utils.metrics_validator import MetricsValidator - - validator = MetricsValidator() - - if not args.validation_command: - print("Available validation commands: strategy, batch") - return - - if args.validation_command == "strategy": - # Validate single strategy - print(f"Validating metrics for {args.symbol}/{args.strategy}...") - - result = validator.validate_best_strategy_metrics( - args.symbol, args.strategy, args.timeframe, args.tolerance - ) + try: + # Prepare a minimal portfolio config for reporters (name + symbols) + # Prefer portfolio 'name' from the collection JSON when available; fallback to file stem. + portfolio_name = Path(collection_path).stem + try: + import json as _json + + with collection_path.open() as _fh: + _cdata = _json.load(_fh) + if isinstance(_cdata, dict): + # direct name + if isinstance(_cdata.get("name"), str): + portfolio_name = _cdata.get("name") or portfolio_name + else: + # named collection wrapper + first = next(iter(_cdata.values())) if _cdata else None + if isinstance(first, dict) and isinstance( + first.get("name"), str + ): + portfolio_name = first.get("name") or portfolio_name + except Exception: + pass - # Generate and display report - report = validator.generate_validation_report(result) - print(report) + portfolio_config = {"name": portfolio_name, "symbols": sorted(symbols)} - elif args.validation_command == "batch": - # Validate multiple strategies - symbols = args.symbols if args.symbols else None - print(f"Validating {args.limit} best strategies...") - if symbols: - print(f"Filtering by symbols: {symbols}") + # Decide which exports to run + do_report = ("report" in exports_list) or ("all" in exports_list) + do_csv = ("csv" in exports_list) or ("all" in exports_list) + do_tradingview = ("tradingview" in exports_list) or ( + "all" in exports_list + ) + do_ai = ("ai" in exports_list) or ("all" in exports_list) - results = validator.validate_multiple_strategies(symbols, args.limit) + # Generate HTML report from DB (DetailedPortfolioReporter) if requested + if do_report: + try: + from src.reporting.collection_report import ( + DetailedPortfolioReporter, + ) - # Generate and display report - report = validator.generate_validation_report(results) - print(report) + reporter = DetailedPortfolioReporter() + start_date = resolved_plan.get("start") or "" + end_date = resolved_plan.get("end") or "" + report_path = reporter.generate_comprehensive_report( + portfolio_config, + start_date or datetime.utcnow().strftime("%Y-%m-%d"), + end_date or datetime.utcnow().strftime("%Y-%m-%d"), + resolved_plan.get("strategies", []), + resolved_plan.get("intervals", ["1d"]), + ) + log.info("Generated HTML report (DB-backed) at %s", report_path) + except Exception: + log.exception( + "DetailedPortfolioReporter not available or failed (skipping HTML report)" + ) - # Save detailed report if requested - if args.output: - with Path(args.output).open("w") as f: - f.write(report) - f.write("\n\n=== Detailed Results ===\n\n") - import json + # Generate CSV exports from DB using RawDataCSVExporter (strict DB-only) if requested + if do_csv: + try: + from src.utils.csv_exporter import RawDataCSVExporter + + # Choose quarter/year from plan start if available, else use current + try: + if resolved_plan.get("start"): + sd = datetime.fromisoformat(resolved_plan.get("start")) + else: + sd = datetime.utcnow() + except Exception: + sd = datetime.utcnow() + quarter = f"Q{((sd.month - 1) // 3) + 1}" + year = str(sd.year) + + csv_exporter = RawDataCSVExporter() + # Choose interval for filenames: prefer '1d' when available + try: + _intervals = list(resolved_plan.get("intervals") or []) + interval = ( + "1d" + if "1d" in _intervals + else (_intervals[0] if _intervals else "1d") + ) + except Exception: + interval = "1d" + # prefer best-strategies format for exports when requested + csv_files = csv_exporter.export_from_database_primary( + quarter, + year, + output_filename=None, + export_format="best-strategies", + portfolio_name=portfolio_config.get("name") or "", + portfolio_path=str(collection_path), + interval=interval, + ) + log.info("Generated CSV exports (DB-backed): %s", csv_files) + except Exception as e_csv: + log.exception("RawDataCSVExporter failed: %s", e_csv) + log.error( + "CSV export failed; primary DB is required for exports." + ) + return 11 - f.write(json.dumps(results, indent=2, default=str)) - print(f"\nDetailed report saved to {args.output}") + # Generate AI recommendations (Markdown + HTML) with unified naming if requested + if do_ai: + try: + from src.ai.investment_recommendations import ( + AIInvestmentRecommendations, + ) + from src.database.db_connection import get_db_session + + ai = AIInvestmentRecommendations(db_session=get_db_session()) + _rec, ai_html_path = ai.generate_portfolio_recommendations( + portfolio_config_path=str(collection_path), + risk_tolerance="moderate", + min_confidence=0.6, + max_assets=10, + quarter=f"{quarter}_{year}", + timeframe=interval, + generate_html=True, + ) + log.info("Generated AI recommendations at %s", ai_html_path) + except Exception: + log.exception("AI recommendations export failed (continuing)") - else: - print(f"Unknown validation command: {args.validation_command}") + # Generate TradingView alerts + if do_tradingview: + try: + from src.utils.tv_alert_exporter import TradingViewAlertExporter + # TradingView exporter expects reports under exports/reports; the reporter will + # have organized the report there via ReportOrganizer. Use default location. + tv_exporter = TradingViewAlertExporter( + reports_dir="exports/reports" + ) + alerts = tv_exporter.export_alerts( + output_file=None, + collection_filter=portfolio_config.get("name"), + interval=interval, + symbols=portfolio_config.get("symbols") or [], + ) + log.info( + "Generated TradingView alerts for %d assets", len(alerts) + ) + except Exception: + log.exception( + "TradingViewAlertExporter not available or failed (skipping TV alerts)" + ) + except Exception: + log.exception("Failed to generate DB-backed exports during dry-run") + return 0 -def handle_reports_command(args): - """Handle report management commands.""" - import os - import sys + # Idempotency: check DB for existing plan_hash if DB available + if not args.force: + try: + from src.database import unified_models - from src.utils.report_organizer import ReportOrganizer + if hasattr(unified_models, "find_run_by_plan_hash"): + existing = unified_models.find_run_by_plan_hash(plan_hash) + if existing and getattr(existing, "status", None) == "succeeded": + log.info( + "A succeeded run with the same plan_hash already exists. Use --force to re-run." + ) + return 0 + except Exception: + log.debug("Could not query DB for existing plan_hash; continuing") - sys.path.append(os.path.dirname(os.path.dirname(__file__))) - from utils.report_organizer import ReportOrganizer + # Execute the plan + rc = run_plan(manifest, outdir, dry_run=args.dry_run) - organizer = ReportOrganizer() + # Best-effort: persist artifact pointers (manifest, engine summary, fallback summary) into unified_models.RunArtifact + try: + from src.database import unified_models # type: ignore[import-not-found] - if args.reports_command == "organize": - print("Organizing existing reports into quarterly structure...") - organizer.organize_existing_reports() - print("Reports organized successfully!") + run = None + try: + if hasattr(unified_models, "find_run_by_plan_hash"): + run = unified_models.find_run_by_plan_hash(plan_hash) + else: + # fallback: query by plan_hash manually + sess_tmp = unified_models.Session() + try: + run = ( + sess_tmp.query(unified_models.Run) + .filter(unified_models.Run.plan_hash == plan_hash) + .one_or_none() + ) + finally: + try: + sess_tmp.close() + except Exception: + pass + except Exception: + log.exception("Failed to locate run for plan_hash %s", plan_hash) + run = None - elif args.reports_command == "list": - reports = organizer.list_quarterly_reports( - args.year if hasattr(args, "year") else None + if run: + sess = unified_models.Session() + try: + artifact_candidates = [ + ("manifest", manifest_path), + ("engine_summary", outdir / "engine_run_summary.json"), + ("run_summary_fallback", outdir / "run_summary_fallback.json"), + ] + added = 0 + for atype, p in artifact_candidates: + try: + # Only persist existing artifact files (handle Path objects) + p_path = Path(p) + if p_path.exists(): + ra = unified_models.RunArtifact( + run_id=getattr(run, "run_id", None), + artifact_type=atype, + path_or_uri=str(p_path), + meta=None, + ) + sess.add(ra) + added += 1 + else: + log.debug("Artifact file not present, skipping: %s", p_path) + except Exception: + log.exception("Failed to add RunArtifact entry for %s", p) + if added: + sess.commit() + # Log number of artifacts added for visibility + try: + cnt = ( + sess.query(unified_models.RunArtifact) + .filter( + unified_models.RunArtifact.run_id + == getattr(run, "run_id", None) + ) + .count() + ) + log.info( + "Persisted %d run artifact pointers to DB for run %s", + cnt, + getattr(run, "run_id", None), + ) + except Exception: + log.info( + "Persisted run artifact pointers to DB for run %s", + getattr(run, "run_id", None), + ) + else: + sess.rollback() + log.debug( + "No artifact files found to persist for run %s", + getattr(run, "run_id", None), + ) + except Exception: + try: + sess.rollback() + except Exception: + pass + log.exception( + "Failed to persist run artifacts to DB for run %s", + getattr(run, "run_id", None), + ) + finally: + try: + sess.close() + except Exception: + pass + except Exception: + log.debug( + "Unified models not available for run_artifact persistence (continuing)" ) - if not reports: - print("No quarterly reports found.") - return - - for year, quarters in reports.items(): - print(f"\n{year}:") - for quarter, report_files in quarters.items(): - print(f" {quarter}:") - for report_file in report_files: - print(f" - {report_file}") - - elif args.reports_command == "cleanup": - keep_quarters = args.keep_quarters if hasattr(args, "keep_quarters") else 8 - print(f"Cleaning up old reports (keeping last {keep_quarters} quarters)...") - organizer.cleanup_old_reports(keep_quarters) - print("Cleanup completed!") - - elif args.reports_command == "latest": - portfolio_name = args.portfolio - latest_report = organizer.get_latest_report(portfolio_name) - - if latest_report: - print(f"Latest report for '{portfolio_name}': {latest_report}") - else: - print(f"No reports found for portfolio '{portfolio_name}'") - - elif args.reports_command == "export-csv": - handle_csv_export_command(args) - - elif args.reports_command == "export-tradingview": - handle_tradingview_export_command(args) + return rc - else: - print( - "Available reports commands: organize, list, cleanup, latest, export-csv, export-tradingview" - ) +def main(argv: Optional[Sequence[str]] = None) -> int: + """Main entrypoint compatible with direct module and top-level dispatch. -def main(): - """Main entry point.""" - parser = create_parser() - args = parser.parse_args() + Behavior: + - If called with 'collection' as a subcommand (e.g. 'collection ...'), + delegate to handle_collection_run with the args after 'collection'. + - If called as part of a larger CLI where other args appear before 'collection', + locate 'collection' in argv and delegate the remainder to handle_collection_run. + - If no args are supplied, print a minimal help summary. + """ + if argv is None: + argv = sys.argv[1:] - if not args.command: + # If no arguments, show basic help + if not argv: + parser = argparse.ArgumentParser( + prog="unified_cli", description="Unified Quant CLI" + ) + parser.add_argument( + "collection", + nargs="?", + help="Run against a collection (see subcommand 'collection')", + ) parser.print_help() - return - - # Setup logging - setup_logging(args.log_level) + return 1 - # Route to appropriate handler + # Locate 'collection' subcommand anywhere in argv try: - if args.command == "data": - handle_data_command(args) - elif args.command == "strategy": - handle_strategy_command(args) - elif args.command == "backtest": - handle_backtest_command(args) - elif args.command == "portfolio": - handle_portfolio_command(args) - elif args.command == "cache": - handle_cache_command(args) - elif args.command == "reports": - handle_reports_command(args) - elif args.command == "ai": - handle_ai_command(args) - elif args.command == "validate": - handle_validation_command(args) - else: - print(f"Unknown command: {args.command}") - parser.print_help() + idx = int(argv.index("collection")) + except ValueError: + idx = -1 + + if idx >= 0: + # Pass everything after the 'collection' token to the dedicated handler + return handle_collection_run(argv[idx + 1 :]) - except KeyboardInterrupt: - print("\nOperation interrupted by user") - except Exception as e: - logging.error("Command failed: %s", e) - raise + # No recognized subcommand found + print("Unknown command. Supported: collection") + return 2 if __name__ == "__main__": - main() + raise SystemExit(main()) diff --git a/src/core/__init__.py b/src/core/__init__.py index 011a054..955f1ec 100644 --- a/src/core/__init__.py +++ b/src/core/__init__.py @@ -5,11 +5,27 @@ from __future__ import annotations +# Import core symbols, but guard optional modules so CLI can run even if some +# components are missing in minimal environments (e.g., CI, trimmed installs). from .backtest_engine import UnifiedBacktestEngine from .cache_manager import UnifiedCacheManager from .data_manager import UnifiedDataManager -from .portfolio_manager import PortfolioManager -from .result_analyzer import UnifiedResultAnalyzer + +# Optional components: try to import, but continue if absent +PortfolioManager = None +UnifiedResultAnalyzer = None + +try: + # Portfolio manager was moved from portfolio_manager.py to collection_manager.py. + # Keep public API stable by importing the same symbol from the new module. + from .collection_manager import PortfolioManager # type: ignore[import-not-found] +except Exception: + PortfolioManager = None + +try: + from .result_analyzer import UnifiedResultAnalyzer # type: ignore[import-not-found] +except Exception: + UnifiedResultAnalyzer = None __all__ = [ "PortfolioManager", diff --git a/src/core/backtest_engine.py b/src/core/backtest_engine.py index ce26168..976697b 100644 --- a/src/core/backtest_engine.py +++ b/src/core/backtest_engine.py @@ -11,7 +11,7 @@ import multiprocessing as mp import time import warnings -from dataclasses import asdict, dataclass +from dataclasses import dataclass from datetime import datetime from typing import Any @@ -29,6 +29,193 @@ warnings.filterwarnings("ignore") +# Defaults +from pathlib import Path + +# Default metric used when none specified in manifest +DEFAULT_METRIC = "sortino_ratio" + + +def _run_backtest_worker(args): + """ + Module-level worker for ProcessPoolExecutor to avoid pickling bound methods. + args: (symbol, strategy, cfg_kwargs) + Returns a serializable dict with result metadata. + """ + symbol, strategy, cfg_kwargs = args + try: + # Import inside worker process + from .backtest_engine import ( + BacktestConfig, # type: ignore[import-not-found] + UnifiedBacktestEngine, # type: ignore[import-not-found] + ) + except Exception: + # Fallback if imports fail in worker - return error + return { + "symbol": symbol, + "strategy": strategy, + "error": "Worker imports failed", + } + + try: + # Construct config inside worker (safe to create per-process) + try: + cfg = BacktestConfig(**cfg_kwargs) # type: ignore[call-arg] + except Exception: + # Fallback minimal config object + class _TmpCfg: + def __init__(self, **kw): + self.__dict__.update(kw) + + cfg = _TmpCfg(**cfg_kwargs) + + # Initialize external strategy loader in the worker process if a path was provided. + # This ensures StrategyFactory / external loader can discover strategies without + # relying on the parent process to have initialized the global loader. + try: + strategies_path = None + if isinstance(cfg_kwargs, dict): + strategies_path = cfg_kwargs.get("strategies_path") + else: + strategies_path = getattr(cfg, "strategies_path", None) + + if strategies_path: + try: + from pathlib import Path as _Path # local import + + from .external_strategy_loader import ( + get_strategy_loader, # type: ignore[import-not-found] + ) + + # Try a set of common candidate locations under the provided strategies_path + candidates = [] + try: + candidates.append(strategies_path) + candidates.append( + str(_Path(strategies_path) / "algorithms" / "python") + ) + candidates.append( + str(_Path(strategies_path) / "algorithms" / "original") + ) + except Exception: + pass + + loader_initialized = False + for cand in candidates: + if not cand: + continue + try: + cand_path = _Path(cand) + if cand_path.exists(): + # Initialize the global loader in this worker process using the candidate path + get_strategy_loader(str(cand_path)) + loader_initialized = True + break + except Exception as exc: + # ignore and try next candidate, but log for diagnostics + log = logging.getLogger(__name__) + log.debug( + "Strategy loader init failed for %s: %s", cand, exc + ) + continue + + # As a final attempt, call get_strategy_loader with the original value + if not loader_initialized: + try: + get_strategy_loader(strategies_path) + except Exception as exc: + log = logging.getLogger(__name__) + log.debug("Final strategy loader init failed: %s", exc) + + except Exception: + # Non-fatal: continue without external strategies + pass + except Exception: + pass + + engine = UnifiedBacktestEngine() + res = engine.run_backtest(symbol, strategy, cfg) + # Build serializable payload for parent process + metrics = res.metrics if getattr(res, "metrics", None) is not None else {} + trades_raw = None + equity_raw = None + try: + import json as _json + + import pandas as _pd + + trades_obj = getattr(res, "trades", None) + if trades_obj is not None: + if isinstance(trades_obj, _pd.DataFrame): + trades_raw = trades_obj.to_csv(index=False) + else: + try: + trades_raw = _json.dumps(trades_obj) + except Exception: + trades_raw = str(trades_obj) + + eq = getattr(res, "equity_curve", None) + if eq is not None and isinstance(eq, _pd.DataFrame): + equity_raw = eq.to_json(orient="records", date_format="iso") + elif eq is not None: + try: + equity_raw = _json.dumps(eq) + except Exception: + equity_raw = str(eq) + except Exception: + trades_raw = None + equity_raw = None + + # Provide a compact, JSON-friendly summary of the backtest result for persistence/inspection + try: + bt_results_raw = { + "metrics": metrics, + "duration_seconds": getattr(res, "duration_seconds", None), + "data_points": getattr(res, "data_points", None), + "parameters": getattr(res, "parameters", None), + # include a lightweight final value if available on the result object + "final_value": None, + } + try: + if getattr(res, "equity_curve", None) is not None: + # If equity_curve is a DataFrame, try to capture the last equity point + eq = res.equity_curve + if hasattr(eq, "iloc") and len(eq) > 0: + last_row = eq.iloc[-1] + # try both 'equity' column or the first numeric column + if "equity" in last_row: + bt_results_raw["final_value"] = float(last_row["equity"]) + else: + # pick first numeric-like column + for v in last_row.values: + try: + bt_results_raw["final_value"] = float(v) + break + except Exception as exc: + logging.getLogger(__name__).debug( + "Failed to extract final_value: %s", exc + ) + continue + except Exception: + # best-effort only + pass + except Exception: + bt_results_raw = None + + return { + "symbol": getattr(res, "symbol", symbol), + "strategy": getattr(res, "strategy", strategy), + "metrics": metrics, + "trades_raw": trades_raw, + "equity_raw": equity_raw, + "bt_results_raw": bt_results_raw, + "error": getattr(res, "error", None), + "duration_seconds": getattr(res, "duration_seconds", None), + "data_points": getattr(res, "data_points", None), + } + except Exception as exc: + return {"symbol": symbol, "strategy": strategy, "error": str(exc)} + def create_backtesting_strategy_adapter(strategy_instance): """Create a backtesting library compatible strategy from our strategy instance.""" @@ -192,9 +379,15 @@ def run_backtest( if cached_result: self.stats["cache_hits"] += 1 self.logger.debug("Cache hit for %s/%s", symbol, strategy) - return self._dict_to_result( + # Convert cached dict to BacktestResult and mark it as coming from cache + res = self._dict_to_result( cached_result, symbol, strategy, parameters, config ) + try: + res.from_cache = True + except Exception: + pass + return res self.stats["cache_misses"] += 1 @@ -231,10 +424,13 @@ def run_backtest( result = self._execute_backtest(symbol, strategy, data, parameters, config) # Cache result if not using custom parameters - if config.use_cache and not custom_parameters and not result.error: - self.cache_manager.cache_backtest_result( - symbol, strategy, parameters, asdict(result), config.interval - ) + # NOTE: Backtest output caching is disabled to ensure results are always + # recomputed and persisted per-run. Data-level caching (market data) is + # preserved. If desired, re-enable result caching here. + # if config.use_cache and not custom_parameters and not result.error: + # self.cache_manager.cache_backtest_result( + # symbol, strategy, parameters, asdict(result), config.interval + # ) result.duration_seconds = time.time() - start_time result.data_points = len(data) @@ -597,27 +793,110 @@ def _execute_portfolio_backtest( def _process_batch( self, batch: list[tuple[str, str]], config: BacktestConfig ) -> list[BacktestResult]: - """Process batch of symbol/strategy combinations.""" - with concurrent.futures.ProcessPoolExecutor( - max_workers=self.max_workers - ) as executor: - futures = { - executor.submit( - self._run_single_backtest_task, symbol, strategy, config - ): (symbol, strategy) - for symbol, strategy in batch - } + """Process batch of symbol/strategy combinations. - results = [] - for future in concurrent.futures.as_completed(futures): - symbol, strategy = futures[future] - try: - result = future.result() - results.append(result) - except Exception as e: - self.logger.error( - "Batch backtest failed for %s/%s: %s", symbol, strategy, e - ) + Uses a module-level worker to avoid pickling bound methods or objects that + are not serializable by multiprocessing. Each worker constructs its own + engine and runs the single backtest there. + """ + results: list[BacktestResult] = [] + + # Build serializable cfg_kwargs for workers (they will construct BacktestConfig) + for i in range( + 0, len(batch), max(1, len(batch)) + ): # keep batching but here we pass full batch to executor.map + # Prepare args for each (symbol, strategy) + worker_args = [] + for symbol, strategy in batch: + cfg_kwargs = { + "symbols": [symbol], + "strategies": [strategy], + "start_date": config.start_date, + "end_date": config.end_date, + "period": getattr(config, "period", None), + "initial_capital": getattr(config, "initial_capital", 10000), + "interval": getattr(config, "interval", "1d"), + "max_workers": getattr(config, "max_workers", None), + # propagate strategies_path from parent cfg (may be present on _TmpCfg) + "strategies_path": getattr(config, "strategies_path", None), + # include commonly expected config attributes so worker-side _TmpCfg has them + "use_cache": getattr(config, "use_cache", True), + "commission": getattr(config, "commission", 0.001), + "save_trades": getattr(config, "save_trades", False), + "save_equity_curve": getattr(config, "save_equity_curve", False), + # Additional worker-facing attributes to avoid attribute errors in fallback _TmpCfg + "override_old_trades": getattr(config, "override_old_trades", True), + "memory_limit_gb": getattr(config, "memory_limit_gb", 8.0), + "asset_type": getattr(config, "asset_type", None), + "futures_mode": getattr(config, "futures_mode", False), + "leverage": getattr(config, "leverage", 1.0), + } + worker_args.append((symbol, strategy, cfg_kwargs)) + + # Use ProcessPoolExecutor with module-level worker to avoid pickling issues + try: + with concurrent.futures.ProcessPoolExecutor( + max_workers=self.max_workers + ) as executor: + for worker_res in executor.map(_run_backtest_worker, worker_args): + # worker_res is a serializable dict + sym = worker_res.get("symbol") + strat = worker_res.get("strategy") + err = worker_res.get("error") + metrics = worker_res.get("metrics", {}) or {} + duration = worker_res.get("duration_seconds", None) + data_points = worker_res.get("data_points", None) + + if err: + self.logger.error( + "Batch backtest failed for %s/%s: %s", sym, strat, err + ) + self.stats["errors"] += 1 + results.append( + BacktestResult( + symbol=sym or "", + strategy=strat or "", + parameters={}, + config=config, + metrics={}, + error=err, + ) + ) + else: + # Construct a minimal BacktestResult for downstream processing + br = BacktestResult( + symbol=sym or "", + strategy=strat or "", + parameters={}, + config=config, + metrics=metrics, + trades=worker_res.get("trades_raw"), + start_date=getattr(config, "start_date", None), + end_date=getattr(config, "end_date", None), + duration_seconds=duration or 0, + data_points=int(data_points) + if data_points is not None + else 0, + error=None, + ) + # Attach raw backtest payloads if present so engine.run can persist them + try: + br.bt_results_raw = worker_res.get( + "bt_results_raw", None + ) + except Exception: + pass + # Reflect worker-level cache hits in parent engine stats + try: + if worker_res.get("cache_hit"): + self.stats["cache_hits"] += 1 + except Exception: + pass + results.append(br) + except Exception as e: + self.logger.error("Failed to execute worker batch: %s", e) + # Convert all batch items to error BacktestResult + for symbol, strategy in batch: self.stats["errors"] += 1 results.append( BacktestResult( @@ -630,7 +909,10 @@ def _process_batch( ) ) - return results + # we've processed the whole provided batch once; break + break + + return results def _run_single_backtest_task( self, symbol: str, strategy: str, config: BacktestConfig @@ -1039,6 +1321,432 @@ def get_performance_stats(self) -> dict[str, Any]: """Get engine performance statistics.""" return self.stats.copy() + def run(self, manifest: dict[str, Any], outdir: Path | str) -> dict[str, Any]: + """ + Manifest-driven executor. + + This method expands the provided manifest (as produced by the CLI) into + BacktestConfig objects and runs batch backtests for each requested + (interval x strategies x symbols) combination. Results are persisted + to the DB via src.database.unified_models (best-effort). + + Returns a summary dict with counts and plan_hash. + """ + import json as _json + from pathlib import Path as _Path + + outdir = _Path(outdir) + outdir.mkdir(parents=True, exist_ok=True) + + plan = manifest.get("plan", {}) + symbols = plan.get("symbols", []) or [] + strategies = plan.get("strategies", []) or [] + intervals = plan.get("intervals", []) or ["1d"] + start = plan.get("start") + end = plan.get("end") + period_mode = plan.get("period_mode", "max") + plan_hash = plan.get("plan_hash") + target_metric = plan.get("metric", DEFAULT_METRIC) + + # Resolve period_mode -> if 'max' leave start/end None so data manager uses full range + if period_mode == "max": + start_date = None + end_date = None + else: + start_date = start + end_date = end + + # Create run row in DB (best-effort) + run_obj = None + run_id = None + try: + from src.database import unified_models # type: ignore[import-not-found] + + try: + # Prefer robust ensure_run_for_manifest which will attempt fallback creation + if hasattr(unified_models, "ensure_run_for_manifest"): + run_obj = unified_models.ensure_run_for_manifest(manifest) + else: + run_obj = unified_models.create_run_from_manifest(manifest) + run_id = getattr(run_obj, "run_id", None) + except Exception: + run_obj = None + run_id = None + except Exception: + run_obj = None + run_id = None + + # Prepare a persistence_context passed to lower-level helpers + # If run_id couldn't be created/resolved, disable persistence to avoid null run_id inserts. + if run_id is None: + persistence_context = None + else: + persistence_context = { + "run_id": run_id, + "target_metric": target_metric, + "plan_hash": plan_hash, + } + + total_results = 0 + errors = 0 + persisted = 0 + results_summary = [] + + # For each interval, create a BacktestConfig and run batch backtests + for interval in intervals: + try: + # Respect export flags from manifest so workers capture trades/equity when requested. + exports = plan.get("exports", []) or [] + if isinstance(exports, str): + exports = [exports] + + # Capture trades by default when DB persistence is active so we can store + # detailed executions into unified_models (trades table and trades_raw). + # Still honor explicit exports flags when provided. + save_trades = ( + "all" in exports + or "trades" in exports + or "trade" in exports + or (persistence_context is not None) + ) + save_equity = ( + "all" in exports or "equity" in exports or "equity_curve" in exports + ) + + cfg_kwargs = { + "symbols": symbols, + "strategies": strategies, + "start_date": start_date, + "end_date": end_date, + "period": period_mode, + "initial_capital": plan.get("initial_capital", 10000), + "interval": interval, + "max_workers": plan.get("max_workers", None), + # propagate strategies_path from manifest so workers can initialize loaders + "strategies_path": plan.get("strategies_path"), + "save_trades": save_trades, + "save_equity_curve": save_equity, + } + # Build BacktestConfig + try: + cfg = BacktestConfig(**cfg_kwargs) + except Exception: + # Fallback: construct minimal config object-like dict + class _TmpCfg: + def __init__(self, **kw): + self.__dict__.update(kw) + + cfg = _TmpCfg(**cfg_kwargs) + + # Ensure fallback config has expected attributes with sensible defaults + # so later code can access them regardless of how cfg was constructed. + _defaults = { + "initial_capital": 10000, + "interval": getattr(cfg, "interval", "1d"), + "max_workers": getattr(cfg, "max_workers", None), + "use_cache": getattr(cfg, "use_cache", True), + "commission": getattr(cfg, "commission", 0.001), + "save_trades": getattr(cfg, "save_trades", False), + "save_equity_curve": getattr(cfg, "save_equity_curve", False), + "override_old_trades": getattr(cfg, "override_old_trades", True), + "memory_limit_gb": getattr(cfg, "memory_limit_gb", 8.0), + "asset_type": getattr(cfg, "asset_type", None), + "futures_mode": getattr(cfg, "futures_mode", False), + "leverage": getattr(cfg, "leverage", 1.0), + } + for _k, _v in _defaults.items(): + if not hasattr(cfg, _k): + try: + setattr(cfg, _k, _v) + except Exception: + # be defensive if cfg disallows setattr + pass + + # Run batch + batch_results = self.run_batch_backtests(cfg) + total_results += len(batch_results) + + # Persist individual results (best-effort) using direct_backtest helper + try: + import src.core.direct_backtest as direct_mod # type: ignore[import-not-found] + + # Only attempt persistence when we have a valid persistence_context (run_id resolved) + if persistence_context: + for r in batch_results: + # Map BacktestResult dataclass to expected dict for persistence + rd = { + "symbol": r.symbol, + "strategy": r.strategy, + "timeframe": getattr(r.config, "interval", interval), + "metrics": r.metrics or {}, + "trades": r.trades if hasattr(r, "trades") else None, + "bt_results": getattr(r, "bt_results_raw", None), + "start_date": getattr(r, "start_date", start_date), + "end_date": getattr(r, "end_date", end_date), + "error": getattr(r, "error", None), + } + + # Force a persistence stub if worker returned no metrics/trades/bt_results + # but did not set an explicit error. This ensures full lineage for the run. + if ( + not rd.get("metrics") + and not rd.get("trades") + and not rd.get("bt_results") + and not rd.get("error") + ): + rd["error"] = "no_result" + + try: + direct_mod._persist_result_to_db( + rd, persistence_context + ) + persisted += 1 + except Exception: + errors += 1 + else: + # Persistence disabled (no run_id); skip storing individual results + pass + except Exception: + # If persistence helper unavailable, skip persistence but continue + pass + + # Summarize top strategies for this interval + for r in batch_results[:5]: + results_summary.append( + { + "symbol": r.symbol, + "strategy": r.strategy, + "interval": getattr(r.config, "interval", interval), + "metric": (r.metrics or {}).get(target_metric), + "error": getattr(r, "error", None), + } + ) + + except Exception as e: + errors += 1 + logging.getLogger(__name__).exception( + "Failed running interval %s: %s", interval, e + ) + continue + + summary = { + "plan_hash": plan_hash, + "total_results": total_results, + "persisted": persisted, + "errors": errors, + "results_sample": results_summary, + } + + # Best-effort: finalize ranks/aggregates and upsert BestStrategy rows into unified_models + try: + if run_id is not None and target_metric: + try: + from src.database import ( + unified_models, # type: ignore[import-not-found] + ) + + sess = unified_models.Session() + try: + # Get distinct symbols for run + symbols = ( + sess.query(unified_models.BacktestResult.symbol) + .filter(unified_models.BacktestResult.run_id == run_id) + .distinct() + .all() + ) + symbols = [s[0] for s in symbols] + + def _is_higher_better(metric_name: str) -> bool: + mn = (metric_name or "").lower() + if "drawdown" in mn or "max_drawdown" in mn or "mdd" in mn: + return False + return True + + for symbol in symbols: + rows = ( + sess.query(unified_models.BacktestResult) + .filter( + unified_models.BacktestResult.run_id == run_id, + unified_models.BacktestResult.symbol == symbol, + ) + .all() + ) + + entries = [] + higher_better = _is_higher_better(target_metric) + for r in rows: + mval = None + try: + if r.metrics and isinstance(r.metrics, dict): + raw = r.metrics.get(target_metric) + mval = None if raw is None else float(raw) + except Exception as exc: + logging.getLogger(__name__).debug( + "Failed to parse metric %s: %s", + target_metric, + exc, + ) + # Treat None as worst + sort_key = ( + float("-inf") + if higher_better + else float("inf") + if mval is None + else mval + ) + if mval is None: + sort_key = ( + float("-inf") if higher_better else float("inf") + ) + entries.append((sort_key, mval is None, r)) + + # Sort and assign ranks + entries.sort(key=lambda x: x[0], reverse=higher_better) + for idx, (_sort_key, _is_null, row) in enumerate(entries): + try: + row.rank_in_symbol = idx + 1 + sess.add(row) + except Exception: + pass + + # Persist SymbolAggregate and BestStrategy for top entry + if entries: + best_row = entries[0][2] + topn = [] + for e in entries[:3]: + r = e[2] + topn.append( + { + "strategy": r.strategy, + "interval": r.interval, + "rank": r.rank_in_symbol, + "metric": None + if r.metrics is None + else r.metrics.get(target_metric), + } + ) + existing_agg = ( + sess.query(unified_models.SymbolAggregate) + .filter( + unified_models.SymbolAggregate.run_id == run_id, + unified_models.SymbolAggregate.symbol == symbol, + unified_models.SymbolAggregate.best_by + == target_metric, + ) + .one_or_none() + ) + summary_json = {"top": topn} + if existing_agg: + existing_agg.best_result = best_row.result_id + existing_agg.summary = summary_json + sess.add(existing_agg) + else: + agg = unified_models.SymbolAggregate( + run_id=run_id, + symbol=symbol, + best_by=target_metric, + best_result=best_row.result_id, + summary=summary_json, + ) + sess.add(agg) + + # Upsert BestStrategy + try: + bs_existing = ( + sess.query(unified_models.BestStrategy) + .filter( + unified_models.BestStrategy.symbol + == symbol, + unified_models.BestStrategy.timeframe + == best_row.interval, + ) + .one_or_none() + ) + + def _num(mdict, key): + try: + if mdict and isinstance(mdict, dict): + v = mdict.get(key) + return ( + float(v) if v is not None else None + ) + except Exception: + return None + return None + + sortino_val = _num( + best_row.metrics, "sortino_ratio" + ) or _num(best_row.metrics, "Sortino_Ratio") + calmar_val = _num( + best_row.metrics, "calmar_ratio" + ) or _num(best_row.metrics, "Calmar_Ratio") + sharpe_val = _num( + best_row.metrics, "sharpe_ratio" + ) or _num(best_row.metrics, "Sharpe_Ratio") + total_return_val = _num( + best_row.metrics, "total_return" + ) or _num(best_row.metrics, "Total_Return") + max_dd_val = _num( + best_row.metrics, "max_drawdown" + ) or _num(best_row.metrics, "Max_Drawdown") + + if bs_existing: + bs_existing.strategy = best_row.strategy + bs_existing.sortino_ratio = sortino_val + bs_existing.calmar_ratio = calmar_val + bs_existing.sharpe_ratio = sharpe_val + bs_existing.total_return = total_return_val + bs_existing.max_drawdown = max_dd_val + bs_existing.backtest_result_id = getattr( + best_row, "result_id", None + ) + bs_existing.updated_at = datetime.utcnow() + sess.add(bs_existing) + else: + bs = unified_models.BestStrategy( + symbol=symbol, + timeframe=best_row.interval, + strategy=best_row.strategy, + sortino_ratio=sortino_val, + calmar_ratio=calmar_val, + sharpe_ratio=sharpe_val, + total_return=total_return_val, + max_drawdown=max_dd_val, + backtest_result_id=getattr( + best_row, "result_id", None + ), + updated_at=datetime.utcnow(), + ) + sess.add(bs) + except Exception: + logging.getLogger(__name__).exception( + "Failed to upsert BestStrategy for %s", symbol + ) + + sess.commit() + finally: + try: + sess.close() + except Exception: + pass + except Exception: + logging.getLogger(__name__).exception( + "Failed to finalize BestStrategy for run %s", run_id + ) + except Exception: + # Non-fatal: continue even if finalization fails + pass + + # Write summary file + try: + summary_path = outdir / "engine_run_summary.json" + with summary_path.open("w", encoding="utf-8") as fh: + _json.dump(summary, fh, indent=2, sort_keys=True, ensure_ascii=False) + except Exception: + pass + + return summary + def clear_cache(self, symbol: str | None = None, strategy: str | None = None): """Clear cached results.""" self.cache_manager.clear_cache(cache_type="backtest", symbol=symbol) @@ -1080,36 +1788,165 @@ def _prepare_data_for_backtesting_lib(self, data: pd.DataFrame) -> pd.DataFrame: return None def _extract_metrics_from_bt_results(self, bt_results) -> dict[str, Any]: - """Extract metrics from backtesting library results.""" + """Extract metrics from backtesting library results. + + This function is defensive: backtesting library results may contain pandas + Timestamps/Timedeltas or other non-scalar types. Coerce values to floats + where sensible and fall back to None/0 when conversion fails. + """ + import math + try: + + def _as_float(v): + """Safely coerce a value to float or return None.""" + if v is None: + return None + # Already a float/int + if isinstance(v, (int, float)): + if isinstance(v, bool): + return float(v) + if math.isfinite(v): + return float(v) + return None + # Numpy numeric types + try: + import numpy as _np + + if isinstance(v, _np.generic): + return float(v.item()) + except Exception: + pass + # Pandas Timestamp/Timedelta -> convert to numeric where appropriate + try: + import pandas as _pd + + if isinstance(v, _pd.Timedelta): + # convert to total days as a numeric proxy (timedeltas appear for volatility sometimes) + try: + return float(v.total_seconds()) + except Exception: + return None + if isinstance(v, _pd.Timestamp): + # Timestamp is not numeric; return None + return None + except Exception: + pass + # Strings that may include percent signs or commas + if isinstance(v, str): + try: + s = v.strip().replace("%", "").replace(",", "") + return float(s) + except Exception: + return None + # Fallback: try numeric conversion + try: + return float(v) + except Exception: + return None + + def _get_first(keys, default=None): + for k in keys: + try: + if isinstance(bt_results, dict) and k in bt_results: + return bt_results.get(k) + except Exception: + pass + return default + + # Map keys with fallbacks + total_return = ( + _as_float( + _get_first(["Return [%]", "Total_Return", "total_return"], 0.0) + ) + or 0.0 + ) + sharpe = ( + _as_float( + _get_first(["Sharpe Ratio", "Sharpe_Ratio", "sharpe_ratio"], 0.0) + ) + or 0.0 + ) + sortino = ( + _as_float( + _get_first(["Sortino Ratio", "Sortino_Ratio", "sortino_ratio"], 0.0) + ) + or 0.0 + ) + calmar = ( + _as_float( + _get_first(["Calmar Ratio", "Calmar_Ratio", "calmar_ratio"], 0.0) + ) + or 0.0 + ) + max_dd = _as_float( + _get_first(["Max. Drawdown [%]", "Max_Drawdown", "max_drawdown"], 0.0) + ) + max_dd = 0.0 if max_dd is None else abs(max_dd) + volatility = ( + _as_float(_get_first(["Volatility [%]", "volatility"], 0.0)) or 0.0 + ) + num_trades = _get_first(["# Trades", "num_trades", "Trades"], 0) or 0 + try: + num_trades = int(num_trades) + except Exception: + num_trades = 0 + win_rate = _as_float(_get_first(["Win Rate [%]", "win_rate"], 0.0)) or 0.0 + profit_factor = ( + _as_float(_get_first(["Profit Factor", "profit_factor"], 1.0)) or 1.0 + ) + best_trade = ( + _as_float(_get_first(["Best Trade [%]", "best_trade"], 0.0)) or 0.0 + ) + worst_trade = ( + _as_float(_get_first(["Worst Trade [%]", "worst_trade"], 0.0)) or 0.0 + ) + avg_trade = ( + _as_float(_get_first(["Avg. Trade [%]", "avg_trade"], 0.0)) or 0.0 + ) + avg_trade_duration = ( + _as_float( + _get_first(["Avg. Trade Duration", "avg_trade_duration"], 0.0) + ) + or 0.0 + ) + start_value = _as_float(_get_first(["Start", "start_value"], 0.0)) or 0.0 + end_value = ( + _as_float( + _get_first(["End", "end_value", "Equity Final [$]"], start_value) + ) + or start_value + ) + buy_hold = ( + _as_float(_get_first(["Buy & Hold Return [%]", "buy_hold_return"], 0.0)) + or 0.0 + ) + exposure = ( + _as_float(_get_first(["Exposure Time [%]", "exposure_time"], 0.0)) + or 0.0 + ) + metrics = { - # Core performance metrics - "total_return": float(bt_results.get("Return [%]", 0.0)), - "sharpe_ratio": float(bt_results.get("Sharpe Ratio", 0.0)), - "sortino_ratio": float(bt_results.get("Sortino Ratio", 0.0)), - "calmar_ratio": float(bt_results.get("Calmar Ratio", 0.0)), - # Risk metrics - "max_drawdown": abs(float(bt_results.get("Max. Drawdown [%]", 0.0))), - "volatility": float(bt_results.get("Volatility [%]", 0.0)), - # Trade metrics - "num_trades": int(bt_results.get("# Trades", 0)), - "win_rate": float(bt_results.get("Win Rate [%]", 0.0)), - "profit_factor": float(bt_results.get("Profit Factor", 1.0)), - # Additional metrics - "best_trade": float(bt_results.get("Best Trade [%]", 0.0)), - "worst_trade": float(bt_results.get("Worst Trade [%]", 0.0)), - "avg_trade": float(bt_results.get("Avg. Trade [%]", 0.0)), - "avg_trade_duration": float(bt_results.get("Avg. Trade Duration", 0.0)), - # Portfolio metrics - "start_value": float(bt_results.get("Start", 0.0)), - "end_value": float(bt_results.get("End", 0.0)), - "buy_hold_return": float(bt_results.get("Buy & Hold Return [%]", 0.0)), - # Exposure - "exposure_time": float(bt_results.get("Exposure Time [%]", 0.0)), + "total_return": total_return, + "sharpe_ratio": sharpe, + "sortino_ratio": sortino, + "calmar_ratio": calmar, + "max_drawdown": max_dd, + "volatility": volatility, + "num_trades": num_trades, + "win_rate": win_rate, + "profit_factor": profit_factor, + "best_trade": best_trade, + "worst_trade": worst_trade, + "avg_trade": avg_trade, + "avg_trade_duration": avg_trade_duration, + "start_value": start_value, + "end_value": end_value, + "buy_hold_return": buy_hold, + "exposure_time": exposure, } return metrics - except Exception as e: self.logger.error( "Error extracting metrics from backtesting results: %s", e diff --git a/src/core/backtest_engine_old.py b/src/core/backtest_engine_old.py deleted file mode 100644 index 12726a8..0000000 --- a/src/core/backtest_engine_old.py +++ /dev/null @@ -1,1240 +0,0 @@ -""" -Unified Backtest Engine - Consolidates all backtesting functionality. -Supports single assets, portfolios, parallel processing, and optimization. -""" - -from __future__ import annotations - -import concurrent.futures -import gc -import logging -import multiprocessing as mp -import time -import warnings -from dataclasses import asdict, dataclass -from datetime import datetime -from typing import Any - -import numpy as np -import pandas as pd -from backtesting import Backtest -from backtesting.lib import SignalStrategy - -from .cache_manager import UnifiedCacheManager -from .data_manager import UnifiedDataManager -from .result_analyzer import UnifiedResultAnalyzer - -# from numba import jit # Removed for compatibility - - -warnings.filterwarnings("ignore") - -logger = logging.getLogger(__name__) - - -def create_backtesting_strategy_adapter(strategy_instance): - """Create a backtesting library compatible strategy from our strategy instance.""" - - class StrategyAdapter(SignalStrategy): - """Adapter to make our strategies work with the backtesting library.""" - - def init(self): - """Initialize the strategy with our custom logic.""" - # Get the data in the format our strategies expect (uppercase columns) - strategy_data = pd.DataFrame( - { - "Open": self.data.Open, - "High": self.data.High, - "Low": self.data.Low, - "Close": self.data.Close, - "Volume": self.data.Volume, - }, - index=self.data.index, - ) - - # Generate signals using our strategy - try: - signals = strategy_instance.generate_signals(strategy_data) - # Ensure signals are aligned with data index - if isinstance(signals, pd.Series): - aligned_signals = signals.reindex(self.data.index, fill_value=0) - else: - aligned_signals = pd.Series( - signals, index=self.data.index, dtype=float - ) - - # Store signals as numpy array for proper indexing - self.signal_array = aligned_signals.values - self.current_bar = 0 # Track current bar manually - - except Exception as e: - # If strategy fails, create zero signals - logger.warning("Strategy signal generation failed: %s", e) - self.signal_array = np.array([0] * len(self.data)) - self.current_bar = 0 - - def next(self): - """Execute trades based on our strategy signals.""" - # Use manual bar tracking for reliable signal indexing - if hasattr(self, "signal_array") and self.current_bar < len( - self.signal_array - ): - current_signal = self.signal_array[self.current_bar] - - if current_signal == 1 and not self.position: - # Buy signal and no position - go long - self.buy() - elif current_signal == -1 and self.position: - # Sell signal and have position - close position - self.sell() - elif current_signal == -1 and not self.position: - # Sell signal and no position - go short (if allowed) - try: - self.sell() - except: - pass # Shorting not allowed or failed - - # Increment bar counter for next call - self.current_bar += 1 - - return StrategyAdapter - - -@dataclass -class BacktestConfig: - """Configuration for backtest runs.""" - - symbols: list[str] - strategies: list[str] - start_date: str - end_date: str - initial_capital: float = 10000 - interval: str = "1d" - commission: float = 0.001 - use_cache: bool = True - save_trades: bool = False - save_equity_curve: bool = False - override_old_trades: bool = ( - True # Whether to clean up old trades for same symbol/strategy - ) - memory_limit_gb: float = 8.0 - max_workers: int = None - asset_type: str = None # 'stocks', 'crypto', 'forex', etc. - futures_mode: bool = False # For crypto futures - leverage: float = 1.0 # For futures trading - - -@dataclass -class BacktestResult: - """Standardized backtest result.""" - - symbol: str - strategy: str - parameters: dict[str, Any] - metrics: dict[str, float] - config: BacktestConfig - equity_curve: pd.DataFrame | None = None - trades: pd.DataFrame | None = None - start_date: str = None - end_date: str = None - duration_seconds: float = 0 - data_points: int = 0 - error: str | None = None - source: str | None = None - - -class UnifiedBacktestEngine: - """ - Unified backtesting engine that consolidates all backtesting functionality. - Supports single assets, portfolios, parallel processing, and various asset types. - """ - - def __init__( - self, - data_manager: UnifiedDataManager = None, - cache_manager: UnifiedCacheManager = None, - max_workers: int | None = None, - memory_limit_gb: float = 8.0, - ): - self.data_manager = data_manager or UnifiedDataManager() - self.cache_manager = cache_manager or UnifiedCacheManager() - self.result_analyzer = UnifiedResultAnalyzer() - - self.max_workers = max_workers or min(mp.cpu_count(), 8) - self.memory_limit_bytes = int(memory_limit_gb * 1024**3) - - self.logger = logging.getLogger(__name__) - self.stats = { - "backtests_run": 0, - "cache_hits": 0, - "cache_misses": 0, - "errors": 0, - "total_time": 0, - } - - def run_backtest( - self, - symbol: str, - strategy: str, - config: BacktestConfig, - custom_parameters: dict[str, Any] | None = None, - ) -> BacktestResult: - """ - Run backtest for a single symbol/strategy combination. - - Args: - symbol: Symbol to backtest - strategy: Strategy name - config: Backtest configuration - custom_parameters: Custom strategy parameters - - Returns: - BacktestResult object - """ - start_time = time.time() - - try: - # Get strategy parameters - parameters = custom_parameters or self._get_default_parameters(strategy) - - # Check cache first - if config.use_cache and not custom_parameters: - cached_result = self.cache_manager.get_backtest_result( - symbol, strategy, parameters, config.interval - ) - if cached_result: - self.stats["cache_hits"] += 1 - self.logger.debug("Cache hit for %s/%s", symbol, strategy) - return self._dict_to_result( - cached_result, symbol, strategy, parameters, config - ) - - self.stats["cache_misses"] += 1 - - # Get market data - if config.futures_mode: - data = self.data_manager.get_crypto_futures_data( - symbol, - config.start_date, - config.end_date, - config.interval, - config.use_cache, - ) - else: - data = self.data_manager.get_data( - symbol, - config.start_date, - config.end_date, - config.interval, - config.use_cache, - config.asset_type, - ) - - if data is None or data.empty: - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=parameters, - config=config, - metrics={}, - error="No data available", - ) - - # Run backtest - result = self._execute_backtest(symbol, strategy, data, parameters, config) - - # Cache result if not using custom parameters - if config.use_cache and not custom_parameters and not result.error: - self.cache_manager.cache_backtest_result( - symbol, strategy, parameters, asdict(result), config.interval - ) - - result.duration_seconds = time.time() - start_time - result.data_points = len(data) - self.stats["backtests_run"] += 1 - - return result - - except Exception as e: - self.stats["errors"] += 1 - self.logger.error("Backtest failed for %s/%s: %s", symbol, strategy, e) - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=custom_parameters or {}, - config=config, - metrics={}, - error=str(e), - duration_seconds=time.time() - start_time, - ) - - def run_batch_backtests(self, config: BacktestConfig) -> list[BacktestResult]: - """ - Run backtests for multiple symbols and strategies in parallel. - - Args: - config: Backtest configuration - - Returns: - List of backtest results - """ - start_time = time.time() - self.logger.info( - "Starting batch backtest: %d symbols, %d strategies", - len(config.symbols), - len(config.strategies), - ) - - # Generate all symbol/strategy combinations - combinations = [ - (symbol, strategy) - for symbol in config.symbols - for strategy in config.strategies - ] - - self.logger.info("Total combinations: %d", len(combinations)) - - # Process in batches to manage memory - batch_size = self._calculate_batch_size( - len(config.symbols), config.memory_limit_gb - ) - results = [] - - for i in range(0, len(combinations), batch_size): - batch = combinations[i : i + batch_size] - self.logger.info( - "Processing batch %d/%d", - i // batch_size + 1, - (len(combinations) - 1) // batch_size + 1, - ) - - batch_results = self._process_batch(batch, config) - results.extend(batch_results) - - # Force garbage collection between batches - gc.collect() - - self.stats["total_time"] = time.time() - start_time - self._log_stats() - - return results - - def run_portfolio_backtest( - self, config: BacktestConfig, weights: dict[str, float] | None = None - ) -> BacktestResult: - """ - Run portfolio backtest with multiple assets. - - Args: - config: Backtest configuration - weights: Asset weights (if None, equal weights used) - - Returns: - Portfolio backtest result - """ - start_time = time.time() - - if not config.strategies or len(config.strategies) != 1: - raise ValueError("Portfolio backtest requires exactly one strategy") - - strategy = config.strategies[0] - - try: - # Get data for all symbols - all_data = self.data_manager.get_batch_data( - config.symbols, - config.start_date, - config.end_date, - config.interval, - config.use_cache, - config.asset_type, - ) - - if not all_data: - return BacktestResult( - symbol="PORTFOLIO", - strategy=strategy, - parameters={}, - config=config, - metrics={}, - error="No data available for any symbol", - ) - - # Calculate equal weights if not provided - if not weights: - weights = {symbol: 1.0 / len(all_data) for symbol in all_data} - - # Normalize weights - total_weight = sum(weights.values()) - weights = {k: v / total_weight for k, v in weights.items()} - - # Run portfolio backtest - portfolio_result = self._execute_portfolio_backtest( - all_data, strategy, weights, config - ) - - portfolio_result.duration_seconds = time.time() - start_time - return portfolio_result - - except Exception as e: - self.logger.error("Portfolio backtest failed: %s", e) - return BacktestResult( - symbol="PORTFOLIO", - strategy=strategy, - parameters={}, - config=config, - metrics={}, - error=str(e), - duration_seconds=time.time() - start_time, - ) - - def run_incremental_backtest( - self, - symbol: str, - strategy: str, - config: BacktestConfig, - last_update: datetime | None = None, - ) -> BacktestResult | None: - """ - Run incremental backtest - only process new data since last run. - - Args: - symbol: Symbol to backtest - strategy: Strategy name - config: Backtest configuration - last_update: Last update timestamp - - Returns: - BacktestResult or None if no new data - """ - # Check if we have cached results - parameters = self._get_default_parameters(strategy) - cached_result = self.cache_manager.get_backtest_result( - symbol, strategy, parameters, config.interval - ) - - if cached_result and not last_update: - self.logger.info("Using cached result for %s/%s", symbol, strategy) - return self._dict_to_result( - cached_result, symbol, strategy, parameters, config - ) - - # Get data and check if we need to update - data = self.data_manager.get_data( - symbol, - config.start_date, - config.end_date, - config.interval, - config.use_cache, - config.asset_type, - ) - - if data is None or data.empty: - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=parameters, - config=config, - metrics={}, - error="No data available", - ) - - # Check if we have new data since last cached result - if cached_result and last_update: - last_data_point = pd.to_datetime( - cached_result.get("end_date", config.start_date) - ) - # Ensure timezone compatibility for comparison - if data.index[-1].tz is not None and last_data_point.tz is None: - last_data_point = last_data_point.tz_localize(data.index[-1].tz) - elif data.index[-1].tz is None and last_data_point.tz is not None: - last_data_point = last_data_point.tz_localize(None) - - if data.index[-1] <= last_data_point: - self.logger.info("No new data for %s/%s", symbol, strategy) - return self._dict_to_result( - cached_result, symbol, strategy, parameters, config - ) - - # Run backtest - return self.run_backtest(symbol, strategy, config) - - def _execute_backtest( - self, - symbol: str, - strategy: str, - data: pd.DataFrame, - parameters: dict[str, Any], - config: BacktestConfig, - ) -> BacktestResult: - """Execute the actual backtest logic.""" - try: - # Get strategy class - strategy_class = self._get_strategy_class(strategy) - if not strategy_class: - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=parameters, - config=config, - metrics={}, - error=f"Strategy {strategy} not found", - ) - - # Initialize strategy - strategy_instance = strategy_class(**parameters) - - # Prepare data for backtesting library (requires uppercase OHLCV) - bt_data = self._prepare_data_for_backtesting_lib(data) - - self.logger.debug("Original data index tz: %s", data.index.tz) - self.logger.debug( - "Prepared bt_data index tz: %s", - bt_data.index.tz if bt_data is not None else "None", - ) - - if bt_data is None or bt_data.empty: - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=parameters, - config=config, - metrics={}, - error="Data preparation failed", - ) - - # Create strategy adapter for backtesting library - StrategyAdapter = create_backtesting_strategy_adapter(strategy_instance) - - # Run backtest using the backtesting library - self.logger.debug( - "Creating Backtest with data shape: %s, index range: %s to %s", - bt_data.shape, - bt_data.index[0], - bt_data.index[-1], - ) - bt = Backtest( - bt_data, - StrategyAdapter, - cash=config.initial_capital, - commission=config.commission, - exclusive_orders=True, - finalize_trades=True, # Close open trades at end to count them - ) - - # Execute backtest - self.logger.debug("Running backtest...") - bt_results = bt.run() - self.logger.debug("Backtest completed successfully") - - # Convert backtesting library results to our format - result = self._convert_backtesting_results(bt_results, bt_data, config) - - # Extract metrics from backtesting library results - metrics = self._extract_metrics_from_bt_results(bt_results) - - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=parameters, - config=config, - metrics=metrics, - equity_curve=( - result.get("equity_curve") if config.save_equity_curve else None - ), - trades=result.get("trades") if config.save_trades else None, - start_date=config.start_date, - end_date=config.end_date, - ) - - except Exception as e: - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=parameters, - config=config, - metrics={}, - error=str(e), - ) - - def _execute_portfolio_backtest( - self, - data_dict: dict[str, pd.DataFrame], - strategy: str, - weights: dict[str, float], - config: BacktestConfig, - ) -> BacktestResult: - """Execute portfolio backtest.""" - try: - # Align all data to common date range - aligned_data = self._align_portfolio_data(data_dict) - - if aligned_data.empty: - return BacktestResult( - symbol="PORTFOLIO", - strategy=strategy, - parameters=weights, - config=config, - metrics={}, - error="No aligned data for portfolio", - ) - - # Calculate portfolio returns - portfolio_returns = self._calculate_portfolio_returns(aligned_data, weights) - - # Create portfolio equity curve - initial_capital = config.initial_capital - equity_curve = (1 + portfolio_returns).cumprod() * initial_capital - - # Calculate portfolio metrics - portfolio_data = { - "returns": portfolio_returns, - "equity_curve": equity_curve, - "weights": weights, - } - - metrics = self.result_analyzer.calculate_portfolio_metrics( - portfolio_data, initial_capital - ) - - return BacktestResult( - symbol="PORTFOLIO", - strategy=strategy, - parameters=weights, - config=config, - metrics=metrics, - equity_curve=( - equity_curve.to_frame("equity") - if config.save_equity_curve - else None - ), - ) - - except Exception as e: - return BacktestResult( - symbol="PORTFOLIO", - strategy=strategy, - parameters=weights, - config=config, - metrics={}, - error=str(e), - ) - - def _process_batch( - self, batch: list[tuple[str, str]], config: BacktestConfig - ) -> list[BacktestResult]: - """Process batch of symbol/strategy combinations.""" - with concurrent.futures.ProcessPoolExecutor( - max_workers=self.max_workers - ) as executor: - futures = { - executor.submit( - self._run_single_backtest_task, symbol, strategy, config - ): (symbol, strategy) - for symbol, strategy in batch - } - - results = [] - for future in concurrent.futures.as_completed(futures): - symbol, strategy = futures[future] - try: - result = future.result() - results.append(result) - except Exception as e: - self.logger.error( - "Batch backtest failed for %s/%s: %s", symbol, strategy, e - ) - self.stats["errors"] += 1 - results.append( - BacktestResult( - symbol=symbol, - strategy=strategy, - parameters={}, - config=config, - metrics={}, - error=str(e), - ) - ) - - return results - - def _run_single_backtest_task( - self, symbol: str, strategy: str, config: BacktestConfig - ) -> BacktestResult: - """Task function for multiprocessing.""" - # Create new instances for this process - data_manager = UnifiedDataManager() - cache_manager = UnifiedCacheManager() - - # Create temporary engine for this process - temp_engine = UnifiedBacktestEngine(data_manager, cache_manager, max_workers=1) - return temp_engine.run_backtest(symbol, strategy, config) - - def _prepare_data_with_indicators( - self, data: pd.DataFrame, strategy_instance - ) -> pd.DataFrame: - """Prepare data with technical indicators required by strategy.""" - prepared_data = data.copy() - - # Add basic indicators that most strategies need - prepared_data = self._add_basic_indicators(prepared_data) - - # Add strategy-specific indicators - if hasattr(strategy_instance, "add_indicators"): - prepared_data = strategy_instance.add_indicators(prepared_data) - - return prepared_data - - def _add_basic_indicators(self, data: pd.DataFrame) -> pd.DataFrame: - """Add basic technical indicators.""" - df = data.copy() - - # Simple moving averages - for period in [10, 20, 50]: - df[f"sma_{period}"] = df["close"].rolling(period).mean() - - # RSI - df["rsi_14"] = self._calculate_rsi(df["close"].values, 14) - - # MACD - macd_line, signal_line, histogram = self._calculate_macd(df["close"].values) - df["macd"] = macd_line - df["macd_signal"] = signal_line - df["macd_histogram"] = histogram - - # Bollinger Bands - sma_20 = df["close"].rolling(20).mean() - std_20 = df["close"].rolling(20).std() - df["bb_upper"] = sma_20 + (std_20 * 2) - df["bb_lower"] = sma_20 - (std_20 * 2) - df["bb_middle"] = sma_20 - - return df - - def _simulate_trading( - self, data: pd.DataFrame, strategy_instance, config: BacktestConfig - ) -> dict[str, Any]: - """Simulate trading based on strategy signals.""" - trades = [] - equity_curve = [] - - capital = config.initial_capital - position = 0 - position_size = 0 - - # Pre-generate all signals for the entire dataset - try: - strategy_data = self._transform_data_for_strategy(data) - all_signals = strategy_instance.generate_signals(strategy_data) - except Exception as e: - self.logger.debug( - "Strategy %s failed: %s", strategy_instance.__class__.__name__, e - ) - # If strategy fails, create zero signals - all_signals = pd.Series(0, index=data.index) - - for i, (timestamp, row) in enumerate(data.iterrows()): - # Get pre-generated signal for this timestamp - signal = all_signals.iloc[i] if i < len(all_signals) else 0 - - # Execute trades based on signal - if signal == 1 and position <= 0: # Buy signal - if position < 0: # Close short position - pnl = (position_size * row["close"] - position_size * position) * -1 - capital += pnl - trades.append( - { - "timestamp": timestamp, - "action": "cover", - "price": row["close"], - "size": abs(position_size), - "pnl": pnl, - } - ) - - # Open long position - use full capital minus commission for BuyAndHold - available_capital = capital / ( - 1 + config.commission - ) # Account for commission in calculation - position_size = available_capital / row["close"] - position = row["close"] - capital -= position_size * row["close"] + ( - position_size * row["close"] * config.commission - ) - - trades.append( - { - "timestamp": timestamp, - "action": "buy", - "price": row["close"], - "size": position_size, - "pnl": 0, - } - ) - - elif signal == -1 and position >= 0: # Sell signal - if position > 0: # Close long position - pnl = position_size * (row["close"] - position) - capital += pnl + (position_size * row["close"]) - trades.append( - { - "timestamp": timestamp, - "action": "sell", - "price": row["close"], - "size": position_size, - "pnl": pnl, - } - ) - position = 0 - position_size = 0 - - # Calculate current portfolio value - if position > 0: - portfolio_value = capital + (position_size * row["close"]) - elif position < 0: - portfolio_value = capital - (position_size * (row["close"] - position)) - else: - portfolio_value = capital - - equity_curve.append({"timestamp": timestamp, "equity": portfolio_value}) - - trades_df = pd.DataFrame(trades) if trades else pd.DataFrame() - - return { - "trades": trades_df, - "equity_curve": pd.DataFrame(equity_curve), - "final_capital": ( - equity_curve[-1]["equity"] if equity_curve else config.initial_capital - ), - } - - def _get_strategy_signal(self, strategy_instance, data: pd.DataFrame) -> int: - """Get trading signal from strategy.""" - if hasattr(strategy_instance, "generate_signals"): - try: - # Transform data to uppercase columns for strategy compatibility - strategy_data = self._transform_data_for_strategy(data) - # Use the correct method name (plural) - signals = strategy_instance.generate_signals(strategy_data) - if len(signals) > 0: - return signals.iloc[-1] # Return last signal - return 0 - except Exception as e: - # Log the actual error for debugging - self.logger.debug( - "Strategy %s failed: %s", strategy_instance.__class__.__name__, e - ) - # Strategy failed - return 0 (no signal) to generate zero metrics - return 0 - - # No generate_signals method - strategy is invalid, return 0 - return 0 - - def _transform_data_for_strategy(self, data: pd.DataFrame) -> pd.DataFrame: - """Transform data columns to uppercase format expected by external strategies.""" - if data is None or data.empty: - return data - - # Only select OHLCV columns that strategies expect - required_columns = ["open", "high", "low", "close", "volume"] - - # Check if all required columns exist - missing_columns = [col for col in required_columns if col not in data.columns] - if missing_columns: - raise ValueError(f"Missing required columns: {missing_columns}") - - # Select only OHLCV columns - df = data[required_columns].copy() - - # Transform lowercase columns to uppercase for strategy compatibility - column_mapping = { - "open": "Open", - "high": "High", - "low": "Low", - "close": "Close", - "volume": "Volume", - } - - # Rename columns - df = df.rename(columns=column_mapping) - - return df - - def _align_portfolio_data(self, data_dict: dict[str, pd.DataFrame]) -> pd.DataFrame: - """Align multiple asset data to common date range.""" - if not data_dict: - return pd.DataFrame() - - # Find common date range - all_dates = None - for symbol, data in data_dict.items(): - all_dates = ( - set(data.index) - if all_dates is None - else all_dates.intersection(set(data.index)) - ) - - if not all_dates: - return pd.DataFrame() - - # Create aligned dataframe - common_dates = sorted(list(all_dates)) - aligned_data = pd.DataFrame(index=common_dates) - - for symbol, data in data_dict.items(): - aligned_data[f"{symbol}_close"] = data.loc[common_dates, "close"] - - return aligned_data.dropna() - - def _calculate_portfolio_returns( - self, aligned_data: pd.DataFrame, weights: dict[str, float] - ) -> pd.Series: - """Calculate portfolio returns.""" - returns = pd.Series(index=aligned_data.index, dtype=float) - - for i in range(1, len(aligned_data)): - portfolio_return = 0 - for symbol, weight in weights.items(): - col_name = f"{symbol}_close" - if col_name in aligned_data.columns: - asset_return = ( - aligned_data[col_name].iloc[i] - / aligned_data[col_name].iloc[i - 1] - ) - 1 - portfolio_return += weight * asset_return - - returns.iloc[i] = portfolio_return - - return returns.fillna(0) - - @staticmethod - # @jit(nopython=True) # Removed for compatibility - def _calculate_rsi(prices: np.ndarray, period: int = 14) -> np.ndarray: - """Fast RSI calculation using Numba.""" - deltas = np.diff(prices) - gains = np.where(deltas > 0, deltas, 0) - losses = np.where(deltas < 0, -deltas, 0) - - avg_gains = np.full_like(prices, np.nan) - avg_losses = np.full_like(prices, np.nan) - rsi = np.full_like(prices, np.nan) - - if len(gains) >= period: - avg_gains[period] = np.mean(gains[:period]) - avg_losses[period] = np.mean(losses[:period]) - - for i in range(period + 1, len(prices)): - avg_gains[i] = (avg_gains[i - 1] * (period - 1) + gains[i - 1]) / period - avg_losses[i] = ( - avg_losses[i - 1] * (period - 1) + losses[i - 1] - ) / period - - if avg_losses[i] == 0: - rsi[i] = 100 - else: - rs = avg_gains[i] / avg_losses[i] - rsi[i] = 100 - (100 / (1 + rs)) - - return rsi - - @staticmethod - # @jit(nopython=True) # Removed for compatibility - def _calculate_macd( - prices: np.ndarray, fast: int = 12, slow: int = 26, signal: int = 9 - ) -> tuple[np.ndarray, np.ndarray, np.ndarray]: - """Fast MACD calculation using Numba.""" - ema_fast = np.full_like(prices, np.nan) - ema_slow = np.full_like(prices, np.nan) - - # Calculate EMAs - alpha_fast = 2.0 / (fast + 1.0) - alpha_slow = 2.0 / (slow + 1.0) - - ema_fast[0] = prices[0] - ema_slow[0] = prices[0] - - for i in range(1, len(prices)): - ema_fast[i] = alpha_fast * prices[i] + (1 - alpha_fast) * ema_fast[i - 1] - ema_slow[i] = alpha_slow * prices[i] + (1 - alpha_slow) * ema_slow[i - 1] - - macd_line = ema_fast - ema_slow - - # Calculate signal line (EMA of MACD) - signal_line = np.full_like(prices, np.nan) - alpha_signal = 2.0 / (signal + 1.0) - - # Start signal line calculation after we have enough MACD data - signal_start = max(fast, slow) - if len(macd_line) > signal_start: - signal_line[signal_start] = macd_line[signal_start] - for i in range(signal_start + 1, len(prices)): - signal_line[i] = ( - alpha_signal * macd_line[i] - + (1 - alpha_signal) * signal_line[i - 1] - ) - - histogram = macd_line - signal_line - - return macd_line, signal_line, histogram - - def _calculate_batch_size(self, num_symbols: int, memory_limit_gb: float) -> int: - """Calculate optimal batch size based on memory constraints.""" - estimated_memory_per_symbol_mb = 50 - available_memory_mb = memory_limit_gb * 1024 * 0.8 - - max_batch_size = int(available_memory_mb / estimated_memory_per_symbol_mb) - return min(max_batch_size, num_symbols, 100) - - def _get_strategy_class(self, strategy_name: str) -> type | None: - """Get strategy class by name using StrategyFactory.""" - try: - from .strategy import StrategyFactory - - # Create an instance and get its class - strategy_instance = StrategyFactory.create_strategy(strategy_name, {}) - return strategy_instance.__class__ - except Exception as e: - self.logger.error("Failed to load strategy %s: %s", strategy_name, e) - return None - - def _get_default_parameters(self, strategy_name: str) -> dict[str, Any]: - """Get default parameters for a strategy.""" - default_params = { - "rsi": {"period": 14, "overbought": 70, "oversold": 30}, - "macd": {"fast": 12, "slow": 26, "signal": 9}, - "bollinger_bands": {"period": 20, "deviation": 2}, - "sma_crossover": {"fast_period": 10, "slow_period": 20}, - } - return default_params.get(strategy_name.lower(), {}) - - def _dict_to_result( - self, - cached_dict: dict, - symbol: str, - strategy: str, - parameters: dict, - config: BacktestConfig, - ) -> BacktestResult: - """Convert cached dictionary to BacktestResult object.""" - import pandas as pd - - # Handle trades data from cache - trades = cached_dict.get("trades") - if trades is not None and isinstance(trades, dict): - # Convert trades dict back to DataFrame - trades = pd.DataFrame(trades) - elif trades is not None and not isinstance(trades, pd.DataFrame): - trades = None - - # Handle equity_curve data from cache - equity_curve = cached_dict.get("equity_curve") - if equity_curve is not None and isinstance(equity_curve, dict): - # Convert equity_curve dict back to DataFrame - equity_curve = pd.DataFrame(equity_curve) - elif equity_curve is not None and not isinstance(equity_curve, pd.DataFrame): - equity_curve = None - - return BacktestResult( - symbol=symbol, - strategy=strategy, - parameters=parameters, - config=config, - metrics=cached_dict.get("metrics", {}), - trades=trades, - equity_curve=equity_curve, - start_date=cached_dict.get("start_date"), - end_date=cached_dict.get("end_date"), - duration_seconds=cached_dict.get("duration_seconds", 0), - data_points=cached_dict.get("data_points", 0), - error=cached_dict.get("error"), - ) - - def _log_stats(self): - """Log performance statistics.""" - self.logger.info("Batch backtest completed:") - self.logger.info(" Total backtests: %s", self.stats["backtests_run"]) - self.logger.info(" Cache hits: %s", self.stats["cache_hits"]) - self.logger.info(" Cache misses: %s", self.stats["cache_misses"]) - self.logger.info(" Errors: %s", self.stats["errors"]) - self.logger.info(" Total time: %.2fs", self.stats["total_time"]) - if self.stats["backtests_run"] > 0: - avg_time = self.stats["total_time"] / self.stats["backtests_run"] - self.logger.info(" Avg time per backtest: %.2fs", avg_time) - - def get_performance_stats(self) -> dict[str, Any]: - """Get engine performance statistics.""" - return self.stats.copy() - - def clear_cache(self, symbol: str | None = None, strategy: str | None = None): - """Clear cached results.""" - self.cache_manager.clear_cache(cache_type="backtest", symbol=symbol) - - def _prepare_data_for_backtesting_lib(self, data: pd.DataFrame) -> pd.DataFrame: - """Prepare data for the backtesting library (requires uppercase OHLCV columns).""" - try: - # Check if we have lowercase columns and convert them - if all( - col in data.columns - for col in ["open", "high", "low", "close", "volume"] - ): - bt_data = data.rename( - columns={ - "open": "Open", - "high": "High", - "low": "Low", - "close": "Close", - "volume": "Volume", - } - )[["Open", "High", "Low", "Close", "Volume"]].copy() - # Check if we already have uppercase columns - elif all( - col in data.columns - for col in ["Open", "High", "Low", "Close", "Volume"] - ): - bt_data = data[["Open", "High", "Low", "Close", "Volume"]].copy() - else: - self.logger.error("Missing required OHLCV columns in data") - return None - - # Ensure no NaN values - bt_data = bt_data.dropna() - - # Remove timezone info from index if present (backtesting library expects naive datetimes) - if bt_data.index.tz is not None: - bt_data.index = bt_data.index.tz_localize(None) - - return bt_data - - except Exception as e: - self.logger.error("Error preparing data for backtesting library: %s", e) - return None - - def _safe_float_convert(self, value) -> float: - """Safely convert value to float, handling Timestamp and other types.""" - if pd.isna(value): - return 0.0 - if isinstance(value, pd.Timestamp): - # Convert timestamp to number of days - return float( - value.value / (24 * 60 * 60 * 1000000000) - ) # nanoseconds to days - if isinstance(value, pd.Timedelta): - # Convert timedelta to number of days - return float(value.total_seconds() / (24 * 60 * 60)) - try: - return float(value) - except (ValueError, TypeError): - return 0.0 - - def _extract_metrics_from_bt_results(self, bt_results) -> dict[str, Any]: - """Extract metrics from backtesting library results.""" - try: - metrics = { - # Core performance metrics - "total_return": self._safe_float_convert( - bt_results.get("Return [%]", 0.0) - ), - "sharpe_ratio": self._safe_float_convert( - bt_results.get("Sharpe Ratio", 0.0) - ), - "sortino_ratio": self._safe_float_convert( - bt_results.get("Sortino Ratio", 0.0) - ), - "calmar_ratio": self._safe_float_convert( - bt_results.get("Calmar Ratio", 0.0) - ), - # Risk metrics - "max_drawdown": abs( - self._safe_float_convert(bt_results.get("Max. Drawdown [%]", 0.0)) - ), - "volatility": self._safe_float_convert( - bt_results.get("Volatility [%]", 0.0) - ), - # Trade metrics - "num_trades": int( - self._safe_float_convert(bt_results.get("# Trades", 0)) - ), - "win_rate": self._safe_float_convert( - bt_results.get("Win Rate [%]", 0.0) - ), - "profit_factor": self._safe_float_convert( - bt_results.get("Profit Factor", 1.0) - ), - # Additional metrics - "best_trade": self._safe_float_convert( - bt_results.get("Best Trade [%]", 0.0) - ), - "worst_trade": self._safe_float_convert( - bt_results.get("Worst Trade [%]", 0.0) - ), - "avg_trade": self._safe_float_convert( - bt_results.get("Avg. Trade [%]", 0.0) - ), - "avg_trade_duration": self._safe_float_convert( - bt_results.get("Avg. Trade Duration", 0.0) - ), - # Portfolio metrics - "start_value": self._safe_float_convert(bt_results.get("Start", 0.0)), - "end_value": self._safe_float_convert(bt_results.get("End", 0.0)), - "buy_hold_return": self._safe_float_convert( - bt_results.get("Buy & Hold Return [%]", 0.0) - ), - # Exposure - "exposure_time": self._safe_float_convert( - bt_results.get("Exposure Time [%]", 0.0) - ), - } - - return metrics - - except Exception as e: - self.logger.error( - "Error extracting metrics from backtesting results: %s", e - ) - return {} - - def _convert_backtesting_results( - self, bt_results, bt_data: pd.DataFrame, config: BacktestConfig - ) -> dict[str, Any]: - """Convert backtesting library results to our internal format.""" - try: - # Get trades from backtesting library - trades_df = None - equity_curve_df = None - - # Try to get trades if available - try: - if hasattr(bt_results, "_trades") and bt_results._trades is not None: - trades_df = bt_results._trades.copy() - - # Get equity curve from backtesting library - if ( - hasattr(bt_results, "_equity_curve") - and bt_results._equity_curve is not None - ): - equity_curve_df = bt_results._equity_curve.copy() - - except Exception as e: - self.logger.debug("Could not extract detailed trade data: %s", e) - - result = { - "trades": trades_df, - "equity_curve": equity_curve_df, - "final_value": self._safe_float_convert( - bt_results.get("End", config.initial_capital) - ), - "total_trades": int( - self._safe_float_convert(bt_results.get("# Trades", 0)) - ), - } - - return result - - except Exception as e: - self.logger.error("Error converting backtesting results: %s", e) - return { - "trades": None, - "equity_curve": None, - "final_value": config.initial_capital, - "total_trades": 0, - } diff --git a/src/core/cache_manager.py b/src/core/cache_manager.py index 99db0ca..2d6a271 100644 --- a/src/core/cache_manager.py +++ b/src/core/cache_manager.py @@ -9,6 +9,7 @@ import hashlib import json import logging +import os import pickle import sqlite3 import threading @@ -19,6 +20,12 @@ import pandas as pd +# Optional Redis for recent overlay cache +try: + import redis as _redis # type: ignore[import-not-found] +except Exception: # pragma: no cover - optional + _redis = None + @dataclass class CacheEntry: @@ -61,6 +68,22 @@ def __init__(self, cache_dir: str = "cache", max_size_gb: float = 10.0): self._init_database() self.logger = logging.getLogger(__name__) + # Optional Redis client for recent overlay layer + self.redis_client = None + try: + use_redis = os.getenv("USE_REDIS_RECENT", "false").lower() == "true" + redis_url = os.getenv("REDIS_URL", "") + if use_redis and _redis is not None and redis_url: + self.redis_client = _redis.from_url(redis_url, decode_responses=False) + # ping to verify + try: + self.redis_client.ping() + self.logger.info("Redis recent overlay enabled (%s)", redis_url) + except Exception: + self.redis_client = None + except Exception: + self.redis_client = None + def _init_database(self) -> None: """Initialize SQLite database for metadata.""" with sqlite3.connect(self.metadata_db) as conn: @@ -597,6 +620,39 @@ def _decompress_data(self, compressed_data: bytes) -> Any: # In production, consider using safer serialization formats return pickle.loads(decompressed) # nosec B301 + # -------- Optional Redis recent overlay helpers --------- + def _redis_recent_key(self, symbol: str, interval: str) -> str: + return f"data:recent:{symbol}:{interval}" + + def get_recent_overlay_from_redis( + self, symbol: str, interval: str + ) -> pd.DataFrame | None: + try: + if not self.redis_client: + return None + key = self._redis_recent_key(symbol, interval) + blob = self.redis_client.get(key) + if not blob: + return None + data = self._decompress_data(blob) + if isinstance(data, pd.DataFrame) and not data.empty: + return data + return None + except Exception: + return None + + def set_recent_overlay_to_redis( + self, symbol: str, interval: str, df: pd.DataFrame, ttl_hours: int = 24 + ) -> None: + try: + if not self.redis_client or df is None or df.empty: + return + key = self._redis_recent_key(symbol, interval) + blob = self._compress_data(df) + self.redis_client.setex(key, int(ttl_hours * 3600), blob) + except Exception: + return + def _hash_parameters(self, parameters: dict[str, Any]) -> str: """Generate hash for parameters.""" params_str = json.dumps(parameters, sort_keys=True) diff --git a/src/core/portfolio_manager.py b/src/core/collection_manager.py similarity index 100% rename from src/core/portfolio_manager.py rename to src/core/collection_manager.py diff --git a/src/core/data_manager.py b/src/core/data_manager.py index 6bd0db9..e1b8fe4 100644 --- a/src/core/data_manager.py +++ b/src/core/data_manager.py @@ -6,6 +6,7 @@ from __future__ import annotations import logging +import os import time import warnings from abc import ABC, abstractmethod @@ -217,7 +218,11 @@ def fetch_data( interval: str = "1d", **kwargs, ) -> Optional[pd.DataFrame]: - """Fetch data from Yahoo Finance.""" + """Fetch data from Yahoo Finance. + + Supports a 'period' kwarg (e.g. 'max', '1y') which will be preferred over + start/end if provided. This mirrors yfinance.Ticker.history semantics. + """ import yfinance as yf self._rate_limit() @@ -226,11 +231,18 @@ def fetch_data( asset_type = kwargs.get("asset_type") transformed_symbol = self.transform_symbol(symbol, asset_type) + # Allow callers to pass 'period' to request the provider's period-based download + period = kwargs.get("period") or kwargs.get("period_mode") or None + try: ticker = yf.Ticker(transformed_symbol) - data = ticker.history(start=start_date, end=end_date, interval=interval) + if period: + # Use period-based download (yfinance handles interval constraints) + data = ticker.history(period=period, interval=interval) + else: + data = ticker.history(start=start_date, end=end_date, interval=interval) - if data.empty: + if data is None or data.empty: return None return self.standardize_data(data) @@ -247,32 +259,55 @@ def fetch_batch_data( interval: str = "1d", **kwargs, ) -> Dict[str, pd.DataFrame]: - """Fetch batch data from Yahoo Finance.""" + """Fetch batch data from Yahoo Finance. + + If a 'period' kwarg is provided it will be used instead of start/end + (matches yfinance.download semantics). + """ import yfinance as yf self._rate_limit() + period = kwargs.get("period") or kwargs.get("period_mode") or None + try: - data = yf.download( - symbols, - start=start_date, - end=end_date, - interval=interval, - group_by="ticker", - progress=False, - ) + if period: + data = yf.download( + symbols, + period=period, + interval=interval, + group_by="ticker", + progress=False, + ) + else: + data = yf.download( + symbols, + start=start_date, + end=end_date, + interval=interval, + group_by="ticker", + progress=False, + ) result = {} if len(symbols) == 1: symbol = symbols[0] - if not data.empty: + if not getattr(data, "empty", False): result[symbol] = self.standardize_data(data) else: + # yfinance.download returns a DataFrame with a top-level column for each ticker for symbol in symbols: - if symbol in data.columns.levels[0]: - symbol_data = data[symbol] - if not symbol_data.empty: - result[symbol] = self.standardize_data(symbol_data) + try: + if symbol in data.columns.levels[0]: + symbol_data = data[symbol] + if not getattr(symbol_data, "empty", False): + result[symbol] = self.standardize_data(symbol_data) + except Exception as exc: + # some downloads return a flat DataFrame for single-column cases; ignore failures per-symbol + self.logger.debug( + "Batch fetch postprocess failed for %s: %s", symbol, exc + ) + continue return result @@ -689,7 +724,7 @@ def _initialize_sources(self) -> None: def add_source(self, source: DataSource): """Add a data source.""" self.sources[source.config.name] = source - self.logger.info("Added data source: %s", source.config.name) + self.logger.debug("Added data source: %s", source.config.name) def get_data( self, @@ -713,14 +748,108 @@ def get_data( asset_type: Asset type hint ('crypto', 'stocks', 'forex', etc.) **kwargs: Additional parameters for specific sources """ - # Check cache first - if use_cache: - cached_data = self.cache_manager.get_data( + # If a native provider period was requested (e.g., period='max'), skip cache reads to ensure + # we fetch the full available history from the source. We'll still write-through to cache below. + period_requested = kwargs.get("period") or kwargs.get("period_mode") + + # Check cache first (only when no explicit provider period was requested) + if use_cache and not period_requested: + # Legacy fast-path: return any single cached hit immediately (maintains test expectations) + legacy_cached = self.cache_manager.get_data( symbol, start_date, end_date, interval ) - if cached_data is not None: - self.logger.debug("Cache hit for %s", symbol) - return cached_data + if legacy_cached is not None: + self.logger.debug("Cache hit (legacy) for %s", symbol) + return legacy_cached + + # Split cache: attempt to merge a full snapshot with a recent overlay + # Try Redis overlay first if available + full_df = self.cache_manager.get_data( + symbol, start_date, end_date, interval, data_type="full" + ) + recent_df = None + try: + recent_df = self.cache_manager.get_recent_overlay_from_redis( + symbol, interval + ) + except Exception: + recent_df = None + if recent_df is None: + recent_df = self.cache_manager.get_data( + symbol, start_date, end_date, interval, data_type="recent" + ) + merged = None + if ( + full_df is not None + and not full_df.empty + and recent_df is not None + and not recent_df.empty + ): + try: + merged = ( + pd.concat([full_df, recent_df]) + .sort_index() + .loc[lambda df: ~df.index.duplicated(keep="last")] + ) + except Exception: + merged = full_df + elif full_df is not None and not full_df.empty: + merged = full_df + elif recent_df is not None and not recent_df.empty: + merged = recent_df + + if merged is not None and not merged.empty: + # If requested range extends beyond merged coverage, auto-extend by fetching missing windows + try: + req_start = pd.to_datetime(start_date) + req_end = pd.to_datetime(end_date) + c_start = merged.index[0] + c_end = merged.index[-1] + need_before = req_start < c_start + need_after = req_end > c_end + except Exception: + need_before = need_after = False + + if need_before: + try: + df_b = self.get_data( + symbol, + start_date, + c_start.strftime("%Y-%m-%d"), + interval, + use_cache=False, + asset_type=asset_type, + period_mode=period_requested, + ) + if df_b is not None and not df_b.empty: + merged = ( + pd.concat([df_b, merged]) + .sort_index() + .loc[lambda df: ~df.index.duplicated(keep="last")] + ) + except Exception: + pass + if need_after: + try: + df_a = self.get_data( + symbol, + c_end.strftime("%Y-%m-%d"), + end_date, + interval, + use_cache=False, + asset_type=asset_type, + period_mode=period_requested, + ) + if df_a is not None and not df_a.empty: + merged = ( + pd.concat([merged, df_a]) + .sort_index() + .loc[lambda df: ~df.index.duplicated(keep="last")] + ) + except Exception: + pass + + return merged # Determine asset type if not provided if not asset_type: @@ -738,15 +867,48 @@ def get_data( symbol, start_date, end_date, interval, **kwargs ) if data is not None and not data.empty: - # Cache the data - if use_cache: + # Always write-through to cache on a fresh fetch. + # Use split-caching: store 'full' when provider period requested, else 'recent'. + cache_kind = "full" if period_requested else "recent" + ttl_hours = 24 if cache_kind == "recent" else 24 * 30 + try: self.cache_manager.cache_data( - symbol, data, interval, source.config.name + symbol, + data, + interval, + source.config.name, + data_type=cache_kind, + ttl_hours=ttl_hours, + ) + except Exception as e: + self.logger.warning( + "Failed to cache data for %s from %s: %s", + symbol, + source.config.name, + e, ) self.logger.info( "Successfully fetched %s from %s", symbol, source.config.name ) + # Freshness check for daily bars + if interval == "1d": + try: + last_bar = data.index[-1].date() + from pandas.tseries.offsets import BDay + + expected = ( + pd.Timestamp(datetime.utcnow().date()) - BDay(1) + ).date() + if last_bar < expected: + self.logger.warning( + "Data for %s seems stale: last=%s expected>=%s", + symbol, + last_bar, + expected, + ) + except Exception: + pass return data except Exception as e: @@ -768,8 +930,8 @@ def get_batch_data( asset_type: str | None = None, **kwargs, ) -> Dict[str, pd.DataFrame]: - """Get data for multiple symbols with intelligent batching.""" - result = {} + """Get data for multiple symbols with intelligent batching and cache-first behavior.""" + result: Dict[str, pd.DataFrame] = {} # Group symbols by asset type for optimal source selection symbol_groups = self._group_symbols_by_type(symbols, asset_type) @@ -777,44 +939,102 @@ def get_batch_data( for group_type, group_symbols in symbol_groups.items(): sources = self._get_sources_for_asset_type(group_type) - # Try batch sources first + # If caching enabled, try to satisfy from cache first to avoid external requests + missing_symbols: List[str] = [] + if use_cache: + for symbol in list(group_symbols): + try: + full_df = self.cache_manager.get_data( + symbol, start_date, end_date, interval, data_type="full" + ) + recent_df = self.cache_manager.get_data( + symbol, start_date, end_date, interval, data_type="recent" + ) + merged = None + if ( + full_df is not None + and not full_df.empty + and recent_df is not None + and not recent_df.empty + ): + merged = ( + pd.concat([full_df, recent_df]) + .sort_index() + .loc[lambda df: ~df.index.duplicated(keep="last")] + ) + elif full_df is not None and not full_df.empty: + merged = full_df + elif recent_df is not None and not recent_df.empty: + merged = recent_df + + if merged is not None and not merged.empty: + result[symbol] = merged + # Track that we used cache for this symbol + continue + missing_symbols.append(symbol) + except Exception as e: + self.logger.warning("Cache lookup failed for %s: %s", symbol, e) + missing_symbols.append(symbol) + else: + missing_symbols = list(group_symbols) + + # Try batch-capable sources for missing symbols for source in sources: - if source.config.supports_batch and len(group_symbols) > 1: + if not missing_symbols: + break + + if source.config.supports_batch and len(missing_symbols) > 1: try: batch_data = source.fetch_batch_data( - group_symbols, start_date, end_date, interval, **kwargs + missing_symbols, start_date, end_date, interval, **kwargs ) + # Add fetched data to result and update cache + fetched_symbols = [] for symbol, data in batch_data.items(): if data is not None and not data.empty: result[symbol] = data + fetched_symbols.append(symbol) if use_cache: - self.cache_manager.cache_data( - symbol, data, interval, source.config.name - ) - group_symbols.remove(symbol) - - if not group_symbols: # All symbols fetched - break + try: + self.cache_manager.cache_data( + symbol, data, interval, source.config.name + ) + except Exception as e: + self.logger.warning( + "Failed to cache data for %s from %s: %s", + symbol, + source.config.name, + e, + ) + + # Remove fetched symbols from missing list + if fetched_symbols: + missing_symbols = [ + s for s in missing_symbols if s not in fetched_symbols + ] except Exception as e: self.logger.warning( "Batch fetch failed from %s: %s", source.config.name, e ) - # Fall back to individual requests for remaining symbols - for symbol in group_symbols: - individual_data = self.get_data( - symbol, - start_date, - end_date, - interval, - use_cache, - group_type, - **kwargs, - ) - if individual_data is not None: - result[symbol] = individual_data + # Fall back to individual requests for any remaining missing symbols + for symbol in missing_symbols: + try: + individual_data = self.get_data( + symbol, + start_date, + end_date, + interval, + use_cache, + group_type, + **kwargs, + ) + if individual_data is not None: + result[symbol] = individual_data + except Exception as e: + self.logger.warning("Individual fetch failed for %s: %s", symbol, e) return result @@ -879,25 +1099,87 @@ def _detect_asset_type(self, symbol: str) -> str: # Default to stocks return "stocks" + # Global override for source ordering per asset type (process-wide) + _global_source_order_overrides: dict[str, list[str]] = {} + + @classmethod + def set_source_order_override( + cls, asset_type: str, ordered_sources: list[str] + ) -> None: + cls._global_source_order_overrides[asset_type] = list(ordered_sources) + def _get_sources_for_asset_type(self, asset_type: str) -> List[DataSource]: - """Get appropriate sources for asset type, sorted by priority.""" + """Get appropriate sources for asset type, sorted by priority or override.""" suitable_sources = [] for source in self.sources.values(): if not source.config.asset_types or asset_type in source.config.asset_types: suitable_sources.append(source) - # Sort by priority (lower number = higher priority) - if asset_type == "crypto": - # Prioritize Bybit for crypto - suitable_sources.sort( - key=lambda x: (0 if x.config.name == "bybit" else x.config.priority) - ) + override = self._global_source_order_overrides.get(asset_type) + if override: + order_idx = {name: i for i, name in enumerate(override)} + suitable_sources.sort(key=lambda x: order_idx.get(x.config.name, 10_000)) else: - suitable_sources.sort(key=lambda x: x.config.priority) + if asset_type == "crypto": + suitable_sources.sort( + key=lambda x: (0 if x.config.name == "bybit" else x.config.priority) + ) + else: + suitable_sources.sort(key=lambda x: x.config.priority) return suitable_sources + def probe_and_set_order( + self, + asset_type: str, + symbols: list[str], + interval: str = "1d", + sample_size: int = 5, + ) -> list[str]: + """Probe sources for coverage and set a global ordering by longest history. + + Skips cache and uses provider period='max'. Returns ordered source names. + """ + sym_sample = symbols[: max(1, min(sample_size, len(symbols)))] + candidates = [s for s in self._get_sources_for_asset_type(asset_type)] + scores: list[tuple[str, int, pd.Timestamp | None]] = [] + + for src in candidates: + total_rows = 0 + earliest: pd.Timestamp | None = None + for s in sym_sample: + try: + df = src.fetch_data( + s, + start_date="1900-01-01", + end_date=datetime.utcnow().date().isoformat(), + interval=interval, + asset_type=asset_type, + period="max", + period_mode="max", + ) + if df is not None and not df.empty: + total_rows += len(df) + f = df.index[0] + earliest = f if earliest is None or f < earliest else earliest + except Exception as exc: + self.logger.debug( + "Probe error for %s via %s: %s", s, src.config.name, exc + ) + continue + scores.append((src.config.name, total_rows, earliest)) + + def _key(t: tuple[str, int, pd.Timestamp | None]): + name, rows, first = t + first_val = first.value if hasattr(first, "value") else 2**63 - 1 + return (-rows, first_val) + + ordered = [name for name, *_ in sorted(scores, key=_key)] + if ordered: + self.set_source_order_override(asset_type, ordered) + return ordered + def _group_symbols_by_type( self, symbols: List[str], default_type: Optional[str] = None ) -> Dict[str, List[str]]: @@ -1282,5 +1564,4 @@ def get_available_symbols(self, asset_type: str | None = None) -> list[str]: return [] -# Import required modules -import os +# (end of module) diff --git a/src/core/direct_backtest.py b/src/core/direct_backtest.py index 26c38fe..02d69e8 100644 --- a/src/core/direct_backtest.py +++ b/src/core/direct_backtest.py @@ -1,12 +1,17 @@ """ Direct Backtesting Library Integration Direct backtesting using the backtesting library. + +This file was extended to support optional persistence of backtest results into the +project database via the lightweight unified_models helper (src.database.unified_models). +Pass an optional persistence_context (dict) to run_direct_backtest / run_strategy_comparison +to enable DB writes. Persistence is best-effort: guarded imports and safe upsert logic. """ from __future__ import annotations import logging -from typing import Any +from typing import Any, Dict, List, Optional import pandas as pd from backtesting import Backtest @@ -16,6 +21,546 @@ from .strategy import StrategyFactory +# Local utilities used when persisting +def _persist_result_to_db( + result: Dict[str, Any], persistence_context: Dict[str, Any] +) -> None: + """ + Persist a single backtest result into the DB using src.database.unified_models. + This function is best-effort and will not raise on failures (only logs). + persistence_context must include at least: + - run_id (str) + - target_metric (str) optional + - session_factory or rely on unified_models.Session + Added debug logging to help diagnose missing metrics persistence. + """ + logger = logging.getLogger(__name__) + logger.debug( + "Called _persist_result_to_db for symbol=%s strategy=%s", + result.get("symbol"), + result.get("strategy"), + ) + logger.debug( + "Persistence context keys: %s", + list((persistence_context or {}).keys()) + if persistence_context is not None + else None, + ) + + try: + from src.database import unified_models # type: ignore[import-not-found] + from src.utils.trades_parser import ( + parse_trades_from_string, # type: ignore[import-not-found] + ) + except Exception: + logger.debug( + "Persistence not available (unified_models or trades parser missing)" + ) + return + + try: + sess = unified_models.Session() + except Exception as e: + logger.exception("Failed to create unified_models.Session(): %s", e) + return + try: + run_id = persistence_context.get("run_id") + # If run_id is missing or falsy, avoid attempting DB writes which will violate NOT NULL constraints. + if not run_id: + logging.getLogger(__name__).debug( + "Persistence context provided but run_id is missing; skipping DB persistence for %s", + result.get("symbol"), + ) + return + + # Check for existing BacktestResult (idempotency) + existing = ( + sess.query(unified_models.BacktestResult) + .filter( + unified_models.BacktestResult.run_id == run_id, + unified_models.BacktestResult.symbol == result.get("symbol"), + unified_models.BacktestResult.strategy == result.get("strategy"), + unified_models.BacktestResult.interval == result.get("timeframe"), + ) + .one_or_none() + ) + + # Prepare payload + metrics = result.get("metrics") or {} + # Try to convert native backtesting stats to a plain dict unconditionally + raw_stats = result.get("bt_results") + engine_ctx = None + try: + if raw_stats is not None: + engine_ctx = ( + raw_stats if isinstance(raw_stats, dict) else dict(raw_stats) + ) + except Exception: + engine_ctx = None + + # If metrics were not provided, derive a few canonical ones from engine_ctx + # so downstream ranking (target_metric) has values to work with. + if not metrics and engine_ctx and isinstance(engine_ctx, dict): + try: + + def _as_float(v): + try: + return float(v) + except Exception: + return None + + # Backtesting.py common keys + sortino = engine_ctx.get("Sortino Ratio") + calmar = engine_ctx.get("Calmar Ratio") + sharpe = engine_ctx.get("Sharpe Ratio") + total_ret = engine_ctx.get("Return [%]") + max_dd = engine_ctx.get("Max. Drawdown [%]") or engine_ctx.get( + "Max Drawdown [%]" + ) + num_trades = engine_ctx.get("# Trades") + + derived = {} + if sortino is not None: + derived["sortino_ratio"] = _as_float(sortino) + derived["Sortino_Ratio"] = derived["sortino_ratio"] + if calmar is not None: + derived["calmar_ratio"] = _as_float(calmar) + derived["Calmar_Ratio"] = derived["calmar_ratio"] + if sharpe is not None: + derived["sharpe_ratio"] = _as_float(sharpe) + derived["Sharpe_Ratio"] = derived["sharpe_ratio"] + if total_ret is not None: + derived["total_return"] = _as_float(total_ret) + derived["Total_Return"] = derived["total_return"] + if max_dd is not None: + derived["max_drawdown"] = _as_float(max_dd) + derived["Max_Drawdown"] = derived["max_drawdown"] + if num_trades is not None: + # leave as float to keep consistent handling downstream + derived["num_trades"] = _as_float(num_trades) + + metrics = derived + except Exception: + # Best-effort only; leave metrics as-is if derivation fails + pass + + # Sanitize JSON-like payloads: replace NaN/Inf with None and convert numpy/pandas objects. + def _sanitize_jsonable(obj): + try: + import math + except Exception: + math = None + try: + import pandas as _pd # type: ignore[import-not-found] + except Exception: + _pd = None + try: + import numpy as _np # type: ignore[import-not-found] + except Exception: + _np = None + + # Pandas DataFrame/Series first: convert then recurse to sanitize nested values + try: + if _pd is not None and isinstance(obj, _pd.DataFrame): + recs = obj.to_dict(orient="records") + return _sanitize_jsonable(recs) + if _pd is not None and isinstance(obj, _pd.Series): + return _sanitize_jsonable(obj.to_dict()) + except Exception: + pass + + # Primitive safe types + if obj is None: + return None + if isinstance(obj, (str, bool, int)): + return obj + # Floats: guard against NaN / Inf which are invalid in JSONB + if isinstance(obj, float): + try: + if math is not None and (math.isnan(obj) or math.isinf(obj)): + return None + except Exception: + return None + return obj + # Numpy scalars + try: + if _np is not None and isinstance(obj, _np.generic): + return _sanitize_jsonable(obj.item()) + except Exception: + pass + # Dicts and lists: recurse + if isinstance(obj, dict): + out = {} + for k, v in obj.items(): + try: + out[k] = _sanitize_jsonable(v) + except Exception: + out[k] = None + return out + if isinstance(obj, (list, tuple)): + return [_sanitize_jsonable(v) for v in obj] + # Fallback: try to coerce to string safely + try: + return str(obj) + except Exception: + return None + + # Apply sanitization before persisting into JSONB columns + try: + metrics = _sanitize_jsonable(metrics) + except Exception: + metrics = {} + try: + engine_ctx = _sanitize_jsonable(engine_ctx) + except Exception: + engine_ctx = None + + trades_raw = None + trades_obj = result.get("trades") + if trades_obj is not None: + try: + if isinstance(trades_obj, pd.DataFrame): + trades_raw = trades_obj.to_csv(index=False) + else: + # If it's a list/dict or other, try json + import json as _json # local import + + trades_raw = _json.dumps(trades_obj) + except Exception: + trades_raw = str(trades_obj) + + # Attach equity curve into engine_ctx for reporting if available + try: + eq = result.get("equity_curve") + if eq is not None: + if engine_ctx is None: + engine_ctx = {} + engine_ctx["_equity_curve"] = _sanitize_jsonable(eq) + # Re-sanitize engine_ctx to ensure no NaN/Inf slipped in + engine_ctx = _sanitize_jsonable(engine_ctx) + except Exception: + pass + + start_at = None + end_at = None + # Try to infer start/end from engine context or trades/data if present + if "start_date" in result and "end_date" in result: + try: + import dateutil.parser as _parser # type: ignore[import-not-found] + + start_at = _parser.parse(result["start_date"]) + end_at = _parser.parse(result["end_date"]) + except Exception: + start_at = None + end_at = None + + if existing: + # Update existing row (idempotent upsert behavior) + existing.metrics = metrics + existing.engine_ctx = engine_ctx + existing.trades_raw = trades_raw + existing.error = result.get("error") + if start_at is not None: + existing.start_at_utc = start_at + if end_at is not None: + existing.end_at_utc = end_at + sess.add(existing) + sess.flush() + result_id = existing.result_id + else: + br = unified_models.BacktestResult( + run_id=run_id, + symbol=result.get("symbol"), + strategy=result.get("strategy"), + interval=result.get("timeframe"), + start_at_utc=start_at, + end_at_utc=end_at, + rank_in_symbol=None, + metrics=metrics, + engine_ctx=engine_ctx, + trades_raw=trades_raw, + error=result.get("error"), + ) + sess.add(br) + sess.flush() + result_id = br.result_id + + # Persist trades normalized rows if possible + if trades_raw: + try: + # Ensure new optional columns exist (best-effort, safe if already present) + try: + unified_models.create_tables() + except Exception: + pass + parsed_trades = parse_trades_from_string(trades_raw) + # Cleanup existing trades for this result (to keep idempotent) + sess.query(unified_models.Trade).filter( + unified_models.Trade.result_id == result_id + ).delete() + for t in parsed_trades: + # Try to parse entry/exit timestamps if available + def _parse_dt(val): + try: + if val is None: + return None + import dateutil.parser as _parser # type: ignore[import-not-found] + + return _parser.parse(str(val)) + except Exception: + return None + + tr = unified_models.Trade( + result_id=result_id, + trade_index=int(t.get("trade_index", 0)), + entry_time=_parse_dt( + t.get("entry_time") + or t.get("EntryTime") + or t.get("entry time") + ), + exit_time=_parse_dt( + t.get("exit_time") + or t.get("ExitTime") + or t.get("exit time") + ), + size=str(t.get("size")) if t.get("size") is not None else None, + entry_bar=int(t.get("entry_bar")) + if t.get("entry_bar") is not None + else None, + exit_bar=int(t.get("exit_bar")) + if t.get("exit_bar") is not None + else None, + entry_price=str(t.get("entry_price")) + if t.get("entry_price") is not None + else None, + exit_price=str(t.get("exit_price")) + if t.get("exit_price") is not None + else None, + pnl=str(t.get("pnl")) if t.get("pnl") is not None else None, + duration=str(t.get("duration")) + if t.get("duration") is not None + else None, + tag=str(t.get("tag")) if t.get("tag") is not None else None, + entry_signals=str(t.get("entry_signals")) + if t.get("entry_signals") is not None + else None, + exit_signals=str(t.get("exit_signals")) + if t.get("exit_signals") is not None + else None, + ) + sess.add(tr) + sess.flush() + except Exception: + logging.getLogger(__name__).exception( + "Failed to persist trades for result %s", result.get("symbol") + ) + + sess.commit() + except Exception: + sess.rollback() + logging.getLogger(__name__).exception( + "Failed to persist backtest result for %s", result.get("symbol") + ) + finally: + sess.close() + + +def finalize_persistence_for_run(run_id: str, target_metric: Optional[str]) -> None: + """ + Finalize DB persistence for a run: compute per-symbol ranks by target metric, + upsert SymbolAggregate summaries and canonical BestStrategy rows. + + This is a best-effort helper and will log/continue on failures. + """ + if not run_id or not target_metric: + logging.getLogger(__name__).debug( + "finalize_persistence_for_run skipped (missing run_id or target_metric)" + ) + return + + def _is_higher_better(metric_name: str) -> bool: + mn = (metric_name or "").lower() + if "drawdown" in mn or "max_drawdown" in mn or "mdd" in mn: + return False + return True + + sess = None + try: + from src.database import unified_models # type: ignore[import-not-found] + + sess = unified_models.Session() + + # Get distinct symbols for run + symbols = ( + sess.query(unified_models.BacktestResult.symbol) + .filter(unified_models.BacktestResult.run_id == run_id) + .distinct() + .all() + ) + symbols = [s[0] for s in symbols] + + for symbol in symbols: + rows = ( + sess.query(unified_models.BacktestResult) + .filter( + unified_models.BacktestResult.run_id == run_id, + unified_models.BacktestResult.symbol == symbol, + ) + .all() + ) + + entries = [] + higher_better = _is_higher_better(target_metric) + for r in rows: + mval = None + try: + if r.metrics and isinstance(r.metrics, dict): + raw = r.metrics.get(target_metric) + mval = None if raw is None else float(raw) + except Exception as exc: + logging.getLogger(__name__).debug( + "Failed to parse metric %s: %s", target_metric, exc + ) + sort_key = ( + (float("-inf") if higher_better else float("inf")) + if mval is None + else mval + ) + entries.append((sort_key, mval is None, r)) + + entries.sort(key=lambda x: x[0], reverse=higher_better) + + for idx, (_sk, _is_null, row) in enumerate(entries): + row.rank_in_symbol = idx + 1 + sess.add(row) + + if entries: + best_row = entries[0][2] + topn = [] + for e in entries[:3]: + r = e[2] + topn.append( + { + "strategy": r.strategy, + "interval": r.interval, + "rank": r.rank_in_symbol, + "metric": None + if r.metrics is None + else r.metrics.get(target_metric), + } + ) + # Upsert SymbolAggregate + existing_agg = ( + sess.query(unified_models.SymbolAggregate) + .filter( + unified_models.SymbolAggregate.run_id == run_id, + unified_models.SymbolAggregate.symbol == symbol, + unified_models.SymbolAggregate.best_by == target_metric, + ) + .one_or_none() + ) + summary = {"top": topn} + if existing_agg: + existing_agg.best_result = best_row.result_id + existing_agg.summary = summary + sess.add(existing_agg) + else: + agg = unified_models.SymbolAggregate( + run_id=run_id, + symbol=symbol, + best_by=target_metric, + best_result=best_row.result_id, + summary=summary, + ) + sess.add(agg) + + # Upsert BestStrategy + try: + bs_existing = ( + sess.query(unified_models.BestStrategy) + .filter( + unified_models.BestStrategy.symbol == symbol, + unified_models.BestStrategy.timeframe == best_row.interval, + ) + .one_or_none() + ) + + def _num(mdict, key): + try: + if mdict and isinstance(mdict, dict): + v = mdict.get(key) + return float(v) if v is not None else None + except Exception: + return None + return None + + sortino_val = _num(best_row.metrics, "sortino_ratio") or _num( + best_row.metrics, "Sortino_Ratio" + ) + calmar_val = _num(best_row.metrics, "calmar_ratio") or _num( + best_row.metrics, "Calmar_Ratio" + ) + sharpe_val = _num(best_row.metrics, "sharpe_ratio") or _num( + best_row.metrics, "Sharpe_Ratio" + ) + total_return_val = _num(best_row.metrics, "total_return") or _num( + best_row.metrics, "Total_Return" + ) + max_dd_val = _num(best_row.metrics, "max_drawdown") or _num( + best_row.metrics, "Max_Drawdown" + ) + + from datetime import datetime as _dt + + if bs_existing: + bs_existing.strategy = best_row.strategy + bs_existing.sortino_ratio = sortino_val + bs_existing.calmar_ratio = calmar_val + bs_existing.sharpe_ratio = sharpe_val + bs_existing.total_return = total_return_val + bs_existing.max_drawdown = max_dd_val + bs_existing.backtest_result_id = getattr( + best_row, "result_id", None + ) + bs_existing.updated_at = _dt.utcnow() + sess.add(bs_existing) + else: + bs = unified_models.BestStrategy( + symbol=symbol, + timeframe=best_row.interval, + strategy=best_row.strategy, + sortino_ratio=sortino_val, + calmar_ratio=calmar_val, + sharpe_ratio=sharpe_val, + total_return=total_return_val, + max_drawdown=max_dd_val, + backtest_result_id=getattr(best_row, "result_id", None), + updated_at=_dt.utcnow(), + ) + sess.add(bs) + except Exception: + logging.getLogger(__name__).exception( + "Failed to upsert BestStrategy for %s", symbol + ) + + sess.commit() + except Exception: + try: + if sess: + sess.rollback() + except Exception: + pass + logging.getLogger(__name__).exception( + "Failed to finalize ranks/aggregates for run %s", run_id + ) + finally: + try: + if sess: + sess.close() + except Exception: + pass + + def run_direct_backtest( symbol: str, strategy_name: str, @@ -24,20 +569,35 @@ def run_direct_backtest( timeframe: str = "1d", initial_capital: float = 10000.0, commission: float = 0.001, -) -> dict[str, Any]: + period: Optional[str] = None, + use_cache: bool = True, + persistence_context: Optional[Dict[str, Any]] = None, +) -> Dict[str, Any]: """ Run backtest using backtesting library directly. Returns ground truth results without wrapper complexity. + + If persistence_context is provided (dict), the function will attempt to persist + the result into the DB via src.database.unified_models. """ logger = logging.getLogger(__name__) try: # Get data data_manager = UnifiedDataManager() - data = data_manager.get_data(symbol, start_date, end_date, timeframe) + # If 'period' is provided, data sources like Yahoo will prefer it over start/end. + data = data_manager.get_data( + symbol, + start_date, + end_date, + timeframe, + use_cache=use_cache, + period=period, + period_mode=period, + ) if data is None or data.empty: - return { + res = { "symbol": symbol, "strategy": strategy_name, "timeframe": timeframe, @@ -46,6 +606,10 @@ def run_direct_backtest( "trades": None, "backtest_object": None, } + # Attempt to persist even no-data case + if persistence_context: + _persist_result_to_db(res, persistence_context) + return res # Prepare data for backtesting library bt_data = data.rename( @@ -78,71 +642,51 @@ def run_direct_backtest( finalize_trades=True, # Ensure all trades are captured ) + # Run and keep native stats object from backtesting library result = bt.run() - # Extract real metrics directly from backtesting library - metrics = { - "total_return": float(result.get("Return [%]", 0.0)), - "sharpe_ratio": ( - float(result.get("Sharpe Ratio", 0.0)) - if not pd.isna(result.get("Sharpe Ratio", 0.0)) - else 0.0 - ), - "sortino_ratio": ( - float(result.get("Sortino Ratio", 0.0)) - if not pd.isna(result.get("Sortino Ratio", 0.0)) - else 0.0 - ), - "calmar_ratio": ( - float(result.get("Calmar Ratio", 0.0)) - if not pd.isna(result.get("Calmar Ratio", 0.0)) - else 0.0 - ), - "max_drawdown": abs(float(result.get("Max. Drawdown [%]", 0.0))), - "volatility": ( - float(result.get("Volatility [%]", 0.0)) - if not pd.isna(result.get("Volatility [%]", 0.0)) - else 0.0 - ), - "num_trades": int(result.get("# Trades", 0)), - "win_rate": ( - float(result.get("Win Rate [%]", 0.0)) - if not pd.isna(result.get("Win Rate [%]", 0.0)) - else 0.0 - ), - "profit_factor": ( - float(result.get("Profit Factor", 1.0)) - if not pd.isna(result.get("Profit Factor", 1.0)) - else 1.0 - ), - "exposure_time": float(result.get("Exposure Time [%]", 0.0)), - "start_value": float(initial_capital), # Use known initial capital - "end_value": float(result.get("Equity Final [$]", initial_capital)), - } - # Extract trades if available trades = None if hasattr(result, "_trades") and not result._trades.empty: trades = result._trades.copy() - elif hasattr(bt, "_data") and hasattr(bt, "_results"): - # Extract from backtest object if available - if hasattr(result, "_trades"): - trades = result._trades - return { + # Extract equity curve if available + equity_curve = None + try: + if hasattr(result, "_equity_curve") and result._equity_curve is not None: + equity_curve = result._equity_curve.copy() + except Exception: + equity_curve = None + + ret = { "symbol": symbol, "strategy": strategy_name, "timeframe": timeframe, "error": None, - "metrics": metrics, + # Do not extract custom metrics; return native stats instead + "metrics": None, "trades": trades, + "equity_curve": equity_curve, "backtest_object": bt, # Include for plotting - "bt_results": result, # Include full results + "bt_results": result, # Native stats/series from backtesting library + "start_date": start_date, + "end_date": end_date, } + # Persist if requested + if persistence_context: + try: + _persist_result_to_db(ret, persistence_context) + except Exception: + logger.exception( + "Failed to persist result for %s/%s", symbol, strategy_name + ) + + return ret + except Exception as e: logger.error("Direct backtest failed for %s/%s: %s", symbol, strategy_name, e) - return { + res = { "symbol": symbol, "strategy": strategy_name, "timeframe": timeframe, @@ -151,19 +695,25 @@ def run_direct_backtest( "trades": None, "backtest_object": None, } + if persistence_context: + _persist_result_to_db(res, persistence_context) + return res def run_strategy_comparison( symbol: str, - strategies: list[str], + strategies: List[str], start_date: str, end_date: str, timeframe: str = "1d", initial_capital: float = 10000.0, -) -> dict[str, Any]: + persistence_context: Optional[Dict[str, Any]] = None, +) -> Dict[str, Any]: """ Compare multiple strategies for a symbol using backtesting library. Returns complete analysis with rankings and plot data. + + If persistence_context is provided, each individual strategy result will be persisted. """ logger = logging.getLogger(__name__) logger.info( @@ -172,31 +722,59 @@ def run_strategy_comparison( results = [] best_result = None - best_sortino = -999 + best_sortino = -999.0 for strategy_name in strategies: result = run_direct_backtest( - symbol, strategy_name, start_date, end_date, timeframe, initial_capital + symbol, + strategy_name, + start_date, + end_date, + timeframe, + initial_capital, + persistence_context=persistence_context, ) results.append(result) - # Track best strategy for plotting - if ( - not result["error"] - and result["metrics"].get("sortino_ratio", 0) > best_sortino - ): - best_sortino = result["metrics"]["sortino_ratio"] - best_result = result + # Track best strategy by Sortino: prefer native bt_results, fallback to metrics['sortino_ratio'] + if not result["error"]: + try: + native = result.get("bt_results") or {} + sortino = native.get("Sortino Ratio", None) + if sortino is None: + # Fallback to normalized metrics key when native field absent + sortino = (result.get("metrics") or {}).get("sortino_ratio") + sortino_val = float("nan") if sortino is None else float(sortino) + except Exception as exc: + logging.getLogger(__name__).debug("Failed to parse Sortino: %s", exc) + sortino_val = float("nan") - # Sort by Sortino ratio - results.sort(key=lambda x: x["metrics"].get("sortino_ratio", 0), reverse=True) + # Treat NaN as very poor + if sortino_val == sortino_val and sortino_val > best_sortino: + best_sortino = sortino_val + best_result = result + + # Sort by native Sortino Ratio + def _sort_key(res: Dict[str, Any]) -> float: + try: + native = res.get("bt_results") or {} + v = native.get("Sortino Ratio", None) + if v is None: + v = (res.get("metrics") or {}).get("sortino_ratio") + val = float(v) if v is not None else float("nan") + # push NaN to the end by returning -inf when NaN + return val if val == val else float("-inf") + except Exception: + return float("-inf") + + results.sort(key=_sort_key, reverse=True) # Add rankings for i, result in enumerate(results): result["rank"] = i + 1 - return { + out = { "symbol": symbol, "timeframe": timeframe, "results": results, @@ -206,8 +784,97 @@ def run_strategy_comparison( [ r for r in results - if not r["error"] and r["metrics"].get("num_trades", 0) > 0 + if not r["error"] + and (lambda _n: (float(_n) if _n is not None else 0.0) > 0.0)( + (r.get("bt_results") or {}).get("# Trades", None) + ) ] ), "date_range": f"{start_date} to {end_date}", } + + # If persistence context contains a run_id and target_metric, finalize ranking/aggregates + try: + run_id = persistence_context.get("run_id") if persistence_context else None + target_metric = ( + persistence_context.get("target_metric") if persistence_context else None + ) + finalize_persistence_for_run(run_id, target_metric) + except Exception: + logging.getLogger(__name__).debug( + "No persistence_context provided or failed to finalize ranks/aggregates" + ) + + # Safety net: directly upsert BestStrategy from in-memory best_result when possible. + # This covers environments where DB state wasn't fully populated yet by finalize. + try: + if persistence_context and best_result and best_result.get("strategy"): + from src.database import unified_models # type: ignore[import-not-found] + + sess = unified_models.Session() + try: + bs_existing = ( + sess.query(unified_models.BestStrategy) + .filter( + unified_models.BestStrategy.symbol == symbol, + unified_models.BestStrategy.timeframe == timeframe, + ) + .one_or_none() + ) + + m = best_result.get("metrics") or {} + + def _num(d, k): + try: + if d and isinstance(d, dict): + v = d.get(k) + return float(v) if v is not None else None + except Exception: + return None + return None + + sortino_val = _num(m, "sortino_ratio") or _num(m, "Sortino_Ratio") + calmar_val = _num(m, "calmar_ratio") or _num(m, "Calmar_Ratio") + sharpe_val = _num(m, "sharpe_ratio") or _num(m, "Sharpe_Ratio") + total_return_val = _num(m, "total_return") or _num(m, "Total_Return") + max_dd_val = _num(m, "max_drawdown") or _num(m, "Max_Drawdown") + + from datetime import datetime as _dt + + if bs_existing: + bs_existing.strategy = best_result.get("strategy") + bs_existing.sortino_ratio = sortino_val + bs_existing.calmar_ratio = calmar_val + bs_existing.sharpe_ratio = sharpe_val + bs_existing.total_return = total_return_val + bs_existing.max_drawdown = max_dd_val + bs_existing.updated_at = _dt.utcnow() + sess.add(bs_existing) + else: + bs = unified_models.BestStrategy( + symbol=symbol, + timeframe=timeframe, + strategy=best_result.get("strategy"), + sortino_ratio=sortino_val, + calmar_ratio=calmar_val, + sharpe_ratio=sharpe_val, + total_return=total_return_val, + max_drawdown=max_dd_val, + updated_at=_dt.utcnow(), + ) + sess.add(bs) + sess.commit() + except Exception: + try: + sess.rollback() + except Exception: + pass + finally: + try: + sess.close() + except Exception: + pass + except Exception: + logging.getLogger(__name__).debug("BestStrategy safety upsert skipped") + + return out diff --git a/src/core/external_strategy_loader.py b/src/core/external_strategy_loader.py index a1aef5f..c26147b 100644 --- a/src/core/external_strategy_loader.py +++ b/src/core/external_strategy_loader.py @@ -43,23 +43,43 @@ def __init__(self, strategies_path: str | None = None): self._discover_strategies() def _discover_strategies(self) -> None: - """Discover available strategy modules""" - if not self.strategies_path.exists(): - logger.warning("Strategies path does not exist: %s", self.strategies_path) - return - - # Look for Python strategy files - for strategy_file in self.strategies_path.glob("*.py"): - if ( - not strategy_file.name.startswith("_") - and strategy_file.name != "README.py" - ): + """Discover available strategy modules. + + Prefer importing strategies from the 'algorithms/python' subdirectory of the + provided strategies_path. If that subdirectory is missing but the provided + strategies_path itself contains standalone Python strategy files (common when + mounting ./quant-strategies/algorithms/python directly to the container root), + fall back to loading .py files from the root of strategies_path. + This keeps imports safe while allowing flexible mount layouts. + """ + try: + alg_py = Path(self.strategies_path) / "algorithms" / "python" + search_dir = None + + # Primary: explicit algorithms/python directory + if alg_py.exists() and alg_py.is_dir(): + search_dir = alg_py + else: + # Fallback: if the strategies_path itself directly contains .py files, + # use that directory (handles mounts like ./quant-strategies/algorithms/python:/app/external_strategies) + sp = Path(self.strategies_path) + if sp.exists() and any(sp.glob("*.py")): + search_dir = sp + + if search_dir is None: + logger.warning( + "algorithms/python directory not found under strategies_path: %s", + alg_py, + ) + return + + for strategy_file in search_dir.glob("*.py"): + if strategy_file.name.startswith("_"): + continue self._load_strategy_file(strategy_file) - # Also look for directory-based strategies with adapters - for strategy_dir in self.strategies_path.iterdir(): - if strategy_dir.is_dir() and not strategy_dir.name.startswith("."): - self._load_strategy_dir(strategy_dir) + except Exception as e: + logger.error("Error discovering strategies in algorithms/python: %s", e) def _load_strategy_file(self, strategy_file: Path) -> None: """ @@ -181,6 +201,43 @@ def list_strategies(self) -> list[str]: """Get list of available strategy names""" return list(self.loaded_strategies.keys()) + def list_strategy_candidates(self) -> list[str]: + """ + Non-import-based discovery: list candidate strategy names (file stems and dirs) + without attempting to import them. This is safe in minimal environments and + useful for CLI discovery (--strategies=all) when imports would fail due to + missing optional dependencies. + """ + candidates: set[str] = set() + try: + if not self.strategies_path or not Path(self.strategies_path).exists(): + return [] + sp = Path(self.strategies_path) + # Python files in root + for f in sp.glob("*.py"): + if not f.name.startswith("_") and f.name != "README.py": + candidates.add(f.stem) + # Files under algorithms/python + alg_py = sp / "algorithms" / "python" + if alg_py.exists(): + for f in alg_py.glob("*.py"): + if not f.name.startswith("_"): + candidates.add(f.stem) + # Files under algorithms/original (some are .py) + alg_orig = sp / "algorithms" / "original" + if alg_orig.exists(): + for f in alg_orig.glob("*.py"): + if not f.name.startswith("_"): + candidates.add(f.stem) + # Directory-based strategies + for d in sp.iterdir(): + if d.is_dir() and not d.name.startswith("."): + candidates.add(d.name.replace("-", "_")) + except Exception: + # Best-effort: return whatever we have collected so far + pass + return sorted(candidates) + def get_strategy_info(self, strategy_name: str) -> dict[str, Any]: """ Get information about a strategy @@ -238,10 +295,28 @@ def get_strategy_loader(strategies_path: str | None = None) -> ExternalStrategyL Returns: ExternalStrategyLoader instance + + Behavior: + - If strategies_path is provided, use it. + - Otherwise prefer the project 'external_strategies' directory (for Docker mounts). + - If that doesn't exist, fall back to the bundled 'quant-strategies' directory. + - If neither exists, initialize loader with None (loader will simply have no strategies). """ global _strategy_loader if _strategy_loader is None: - _strategy_loader = ExternalStrategyLoader(strategies_path) + resolved = strategies_path + if resolved is None: + # Resolve sensible defaults relative to project root + project_root = Path(__file__).parent.parent.parent + external_dir = project_root / "external_strategies" + quant_dir = project_root / "quant-strategies" + if external_dir.exists(): + resolved = str(external_dir) + elif quant_dir.exists(): + resolved = str(quant_dir) + else: + resolved = None + _strategy_loader = ExternalStrategyLoader(resolved) return _strategy_loader diff --git a/src/core/simple_backtest_engine.py b/src/core/simple_backtest_engine.py deleted file mode 100644 index bbafa45..0000000 --- a/src/core/simple_backtest_engine.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -Simple Backtest Engine - Direct backtesting library integration -Simple, reliable backtesting approach using the backtesting library. -""" - -from __future__ import annotations - -import logging -from typing import Any - -import pandas as pd -from backtesting import Backtest - -from .backtest_engine import ( - BacktestConfig, - BacktestResult, - create_backtesting_strategy_adapter, -) -from .data_manager import UnifiedDataManager -from .strategy import StrategyFactory - - -class SimpleBacktestEngine: - """ - Simplified backtest engine using backtesting library directly. - Eliminates wrapper complexity and provides ground truth results. - """ - - def __init__(self): - self.logger = logging.getLogger(__name__) - self.data_manager = UnifiedDataManager() - - def run_single_backtest( - self, symbol: str, strategy_name: str, config: BacktestConfig - ) -> BacktestResult: - """Run a single backtest using backtesting library directly.""" - try: - # Get data - data = self.data_manager.get_data( - symbol, config.start_date, config.end_date, config.interval - ) - - if data is None or data.empty: - return BacktestResult( - symbol=symbol, - strategy=strategy_name, - parameters={}, - config=config, - metrics={}, - error="No data available", - ) - - # Prepare data for backtesting library - bt_data = self._prepare_data(data) - - # Create strategy - strategy = StrategyFactory.create_strategy(strategy_name) - StrategyClass = create_backtesting_strategy_adapter(strategy) - - # Run backtest with backtesting library - bt = Backtest( - bt_data, - StrategyClass, - cash=config.initial_capital, - commission=config.commission, - finalize_trades=True, # Ensure all trades are captured - ) - - bt_results = bt.run() - - # Extract metrics directly from backtesting library - metrics = self._extract_metrics(bt_results) - - # Extract trades if requested - trades = None - if config.save_trades and hasattr(bt_results, "_trades"): - trades = ( - bt_results._trades.copy() if not bt_results._trades.empty else None - ) - - return BacktestResult( - symbol=symbol, - strategy=strategy_name, - parameters={}, - config=config, - metrics=metrics, - trades=trades, - error=None, - ) - - except Exception as e: - self.logger.error("Backtest failed for %s/%s: %s", symbol, strategy_name, e) - return BacktestResult( - symbol=symbol, - strategy=strategy_name, - parameters={}, - config=config, - metrics={}, - error=str(e), - ) - - def run_batch_backtests(self, config: BacktestConfig) -> list[BacktestResult]: - """Run multiple backtests.""" - results = [] - - for symbol in config.symbols: - for strategy in config.strategies: - result = self.run_single_backtest(symbol, strategy, config) - results.append(result) - - return results - - def _prepare_data(self, data: pd.DataFrame) -> pd.DataFrame: - """Prepare data for backtesting library (uppercase OHLCV columns).""" - if all( - col in data.columns for col in ["open", "high", "low", "close", "volume"] - ): - return data.rename( - columns={ - "open": "Open", - "high": "High", - "low": "Low", - "close": "Close", - "volume": "Volume", - } - )[["Open", "High", "Low", "Close", "Volume"]] - return data[["Open", "High", "Low", "Close", "Volume"]] - - def _extract_metrics(self, bt_results) -> dict[str, Any]: - """Extract metrics directly from backtesting library results.""" - return { - # Core performance metrics - "total_return": float(bt_results.get("Return [%]", 0.0)), - "sharpe_ratio": float(bt_results.get("Sharpe Ratio", 0.0)) - if not pd.isna(bt_results.get("Sharpe Ratio", 0.0)) - else 0.0, - "sortino_ratio": float(bt_results.get("Sortino Ratio", 0.0)) - if not pd.isna(bt_results.get("Sortino Ratio", 0.0)) - else 0.0, - "calmar_ratio": float(bt_results.get("Calmar Ratio", 0.0)) - if not pd.isna(bt_results.get("Calmar Ratio", 0.0)) - else 0.0, - # Risk metrics - "max_drawdown": abs(float(bt_results.get("Max. Drawdown [%]", 0.0))), - "volatility": float(bt_results.get("Volatility [%]", 0.0)) - if not pd.isna(bt_results.get("Volatility [%]", 0.0)) - else 0.0, - # Trade metrics - "num_trades": int(bt_results.get("# Trades", 0)), - "win_rate": float(bt_results.get("Win Rate [%]", 0.0)) - if not pd.isna(bt_results.get("Win Rate [%]", 0.0)) - else 0.0, - "profit_factor": float(bt_results.get("Profit Factor", 1.0)) - if not pd.isna(bt_results.get("Profit Factor", 1.0)) - else 1.0, - # Additional metrics - "exposure_time": float(bt_results.get("Exposure Time [%]", 0.0)), - "start_value": float(bt_results.get("Start", 0.0)), - "end_value": float(bt_results.get("End", 0.0)), - } diff --git a/src/database/unified_models.py b/src/database/unified_models.py new file mode 100644 index 0000000..b0f72be --- /dev/null +++ b/src/database/unified_models.py @@ -0,0 +1,480 @@ +""" +Lightweight SQLAlchemy models and helpers for the Unified CLI run lineage and results. + +This module provides: +- Declarative models for runs, backtest_results, trades, symbol_aggregates, run_artifacts. +- Helper functions: create_tables, create_run_from_manifest, find_run_by_plan_hash. + +It is intentionally defensive: tries to reuse src.database.db_connection.get_engine() if available, +falls back to a sqlite file-based engine when not. Designed for best-effort use by the CLI. +""" + +from __future__ import annotations + +import os +import uuid +from datetime import datetime +from typing import Any, Dict, Optional + +from sqlalchemy import ( + Column, + DateTime, + ForeignKey, + Integer, + String, + Text, + UniqueConstraint, + create_engine, +) +from sqlalchemy.dialects.postgresql import ( + JSONB as PG_JSONB, # type: ignore[import-not-found] +) +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import declarative_base, relationship, scoped_session, sessionmaker + +# Prefer JSONB for Postgres, fallback to generic JSON +try: + from sqlalchemy import JSON as SQLJSON # type: ignore[import-not-found] +except Exception: + SQLJSON = Text + +Base = declarative_base() + + +# Engine/session helpers +def _get_engine(): + # Try to reuse project's db_connection engine helpers if present + # Prefer sync engine so this module stays simple. + # Test/CI override: force lightweight SQLite to avoid external DB dependency + try: + force_sqlite = False + # Common signals for test/CI environments available at import time + if os.environ.get("UNIFIED_MODELS_SQLITE", "").lower() in { + "1", + "true", + "yes", + } or os.environ.get("CI", "").lower() in {"1", "true", "yes"}: + force_sqlite = True + elif os.environ.get("PYTEST_CURRENT_TEST"): + # Usually set by pytest while collecting/running tests + force_sqlite = True + elif os.environ.get("TESTING", "").lower() in {"1", "true", "yes"}: + force_sqlite = True + + if force_sqlite: + database_url = f"sqlite:///{os.path.abspath('quant_unified_test.db')}" + return create_engine(database_url, echo=False, future=True) + except Exception: + pass + try: + from src.database.db_connection import ( + get_sync_engine, # type: ignore[import-not-found] + ) + + eng = get_sync_engine() + if eng is not None: + return eng + except Exception: + pass + try: + # As a secondary option, try the DatabaseManager property if exported + from src.database.db_connection import ( + db_manager, # type: ignore[import-not-found] + ) + + eng = getattr(db_manager, "sync_engine", None) + if eng is not None: + return eng + except Exception: + pass + + # Fallback: use DATABASE_URL env var or sqlite file + database_url = ( + os.environ.get("DATABASE_URL") + or f"sqlite:///{os.path.abspath('quant_unified.db')}" + ) + eng = create_engine(database_url, echo=False, future=True) + return eng + + +ENGINE = _get_engine() +Session = scoped_session( + sessionmaker(bind=ENGINE, autoflush=False, future=True, expire_on_commit=False) +) + + +# Helper to pick JSON type depending on DB +def JSON_TYPE(): + url = str(ENGINE.url).lower() if ENGINE and ENGINE.url else "" + if "postgres" in url or "psql" in url: + return PG_JSONB + return SQLJSON + + +class Run(Base): + __tablename__ = "runs" + # Note: default schema left to DB config; migrations can add schema `quant` if desired. + run_id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + started_at_utc = Column( + DateTime(timezone=True), nullable=False, default=datetime.utcnow + ) + finished_at_utc = Column(DateTime(timezone=True), nullable=True) + actor = Column(String(128), nullable=False) + action = Column(String(64), nullable=False) + collection_ref = Column(Text, nullable=False) + strategies_mode = Column(String(256), nullable=False) + intervals_mode = Column(String(256), nullable=False) + target_metric = Column(String(64), nullable=False) + period_mode = Column(String(64), nullable=False) + args_json = Column(JSON_TYPE(), nullable=False) + git_sha_app = Column(String(64), nullable=True) + git_sha_strat = Column(String(64), nullable=True) + data_source = Column(String(128), nullable=True) + plan_hash = Column(String(128), nullable=False, unique=True, index=True) + status = Column(String(32), nullable=False, default="running") + error_summary = Column(Text, nullable=True) + + +class BacktestResult(Base): + __tablename__ = "backtest_results" + result_id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + run_id = Column( + String(36), + ForeignKey("runs.run_id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + symbol = Column(String(64), nullable=False, index=True) + strategy = Column(String(256), nullable=False, index=True) + interval = Column(String(32), nullable=False, index=True) + start_at_utc = Column(DateTime(timezone=True), nullable=True) + end_at_utc = Column(DateTime(timezone=True), nullable=True) + rank_in_symbol = Column(Integer, nullable=True) + metrics = Column(JSON_TYPE(), nullable=False) + engine_ctx = Column(JSON_TYPE(), nullable=True) + trades_raw = Column(Text, nullable=True) + error = Column(Text, nullable=True) + + __table_args__ = ( + UniqueConstraint( + "run_id", + "symbol", + "strategy", + "interval", + name="uq_run_symbol_strategy_interval", + ), + ) + + run = relationship("Run", backref="results") + + +class Trade(Base): + __tablename__ = "trades" + trade_id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + result_id = Column( + String(36), + ForeignKey("backtest_results.result_id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + trade_index = Column(Integer, nullable=False) + # Optional timestamps for entry/exit (UTC) + entry_time = Column(DateTime(timezone=True), nullable=True) + exit_time = Column(DateTime(timezone=True), nullable=True) + size = Column(String(64), nullable=True) + entry_bar = Column(Integer, nullable=True) + exit_bar = Column(Integer, nullable=True) + entry_price = Column(String(64), nullable=True) + exit_price = Column(String(64), nullable=True) + pnl = Column(String(64), nullable=True) + duration = Column(String(64), nullable=True) + tag = Column(String(128), nullable=True) + entry_signals = Column(Text, nullable=True) + exit_signals = Column(Text, nullable=True) + + __table_args__ = ( + UniqueConstraint("result_id", "trade_index", name="uq_result_trade_index"), + ) + + +class SymbolAggregate(Base): + __tablename__ = "symbol_aggregates" + id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + run_id = Column( + String(36), + ForeignKey("runs.run_id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + symbol = Column(String(64), nullable=False) + best_by = Column(String(64), nullable=False) + best_result = Column( + String(36), + ForeignKey("backtest_results.result_id", ondelete="CASCADE"), + nullable=False, + ) + summary = Column(JSON_TYPE(), nullable=False) + + __table_args__ = ( + UniqueConstraint("run_id", "symbol", "best_by", name="uq_run_symbol_bestby"), + ) + + +class RunArtifact(Base): + __tablename__ = "run_artifacts" + artifact_id = Column( + String(36), primary_key=True, default=lambda: str(uuid.uuid4()) + ) + run_id = Column( + String(36), + ForeignKey("runs.run_id", ondelete="CASCADE"), + nullable=False, + index=True, + ) + artifact_type = Column(String(64), nullable=False) + path_or_uri = Column(Text, nullable=False) + meta = Column(JSON_TYPE(), nullable=True) + + +class BestStrategy(Base): + """Best performing strategy for each symbol/timeframe combination (lightweight).""" + + __tablename__ = "best_strategies" + __table_args__ = ( + UniqueConstraint("symbol", "timeframe", name="uq_best_symbol_timeframe"), + ) + + id = Column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + symbol = Column(String(64), nullable=False, index=True) + timeframe = Column(String(32), nullable=False, index=True) + strategy = Column(String(256), nullable=False) + + # Performance metrics + sortino_ratio = Column( + SQLJSON().type if False else SQLJSON, nullable=True + ) # keep flexible; actual usage stores numbers + calmar_ratio = Column(SQLJSON().type if False else SQLJSON, nullable=True) + sharpe_ratio = Column(SQLJSON().type if False else SQLJSON, nullable=True) + total_return = Column(SQLJSON().type if False else SQLJSON, nullable=True) + max_drawdown = Column(SQLJSON().type if False else SQLJSON, nullable=True) + + backtest_result_id = Column(String(36), nullable=True) + updated_at = Column(DateTime(timezone=True), nullable=True) + + +def create_tables(): + Base.metadata.create_all(ENGINE) + # Best-effort migration: ensure new optional columns exist + try: + _ensure_trade_time_columns() + except Exception: + pass + + +def drop_tables(): + """Drop all tables for a full reset (dangerous).""" + try: + Base.metadata.drop_all(ENGINE) + except Exception: + # best-effort; caller can recreate afterwards + pass + + +def _ensure_trade_time_columns() -> None: + """Add entry_time and exit_time columns to trades if missing (best-effort). + + Uses SQLAlchemy Inspector to detect existing columns. Adds TIMESTAMPTZ for Postgres + and TEXT for SQLite (stored as ISO strings). + """ + try: + from sqlalchemy import inspect, text + + insp = inspect(ENGINE) + cols = {c.get("name") for c in insp.get_columns("trades")} + to_add = [] + if "entry_time" not in cols: + to_add.append("entry_time") + if "exit_time" not in cols: + to_add.append("exit_time") + if not to_add: + return + url = str(ENGINE.url).lower() if ENGINE and ENGINE.url else "" + with ENGINE.begin() as conn: + for col in to_add: + if "postgres" in url or "psql" in url: + conn.execute( + text( + f"ALTER TABLE trades ADD COLUMN IF NOT EXISTS {col} TIMESTAMPTZ NULL" + ) + ) + else: + # SQLite and others: check again to avoid errors, then add as TEXT + if col not in {c.get("name") for c in insp.get_columns("trades")}: + conn.execute(text(f"ALTER TABLE trades ADD COLUMN {col} TEXT")) + except Exception: + # Silent; optional migration + pass + + +# Convenience helpers used by CLI +def create_run_from_manifest(manifest: Dict[str, Any]) -> Optional[Run]: + """ + Insert a Run row from manifest dict. If a run with same plan_hash exists, return it. + """ + sess = Session() + plan_hash = manifest.get("plan", {}).get("plan_hash") + if not plan_hash: + raise ValueError("Manifest missing plan.plan_hash") + try: + existing = sess.query(Run).filter(Run.plan_hash == plan_hash).one_or_none() + if existing: + return existing + # Defensive truncation to avoid DB column length violations (e.g., long strategy lists) + try: + strategies_raw = manifest["plan"].get("strategies", []) + if isinstance(strategies_raw, (list, tuple)): + strategies_mode_raw = ",".join([str(s) for s in strategies_raw]) + else: + strategies_mode_raw = str(strategies_raw) + except Exception: + strategies_mode_raw = "" + + if len(strategies_mode_raw) > 256: + strategies_mode = strategies_mode_raw[:252] + "..." + else: + strategies_mode = strategies_mode_raw + + try: + intervals_raw = manifest["plan"].get("intervals", []) + if isinstance(intervals_raw, (list, tuple)): + intervals_mode_raw = ",".join([str(i) for i in intervals_raw]) + else: + intervals_mode_raw = str(intervals_raw) + except Exception: + intervals_mode_raw = "" + + if len(intervals_mode_raw) > 256: + intervals_mode = intervals_mode_raw[:252] + "..." + else: + intervals_mode = intervals_mode_raw + + run = Run( + actor=manifest["plan"].get("actor", "cli"), + action=manifest["plan"].get("action", "backtest"), + collection_ref=manifest["plan"].get("collection", ""), + strategies_mode=strategies_mode, + intervals_mode=intervals_mode, + target_metric=manifest["plan"].get("metric", ""), + period_mode=manifest["plan"].get("period_mode", ""), + args_json=manifest["plan"], + git_sha_app=manifest["plan"].get("git_sha_app"), + git_sha_strat=manifest["plan"].get("git_sha_strat"), + plan_hash=plan_hash, + status="running", + ) + sess.add(run) + sess.commit() + return run + except IntegrityError: + sess.rollback() + return sess.query(Run).filter(Run.plan_hash == plan_hash).one_or_none() + finally: + sess.close() + + +def ensure_run_for_manifest(manifest: Dict[str, Any]) -> Optional[Run]: + """ + Ensure a Run exists for the given manifest. + Tries create_run_from_manifest first. If that fails, attempts a manual upsert. + Returns a Run instance or None on failure. + """ + plan_hash = manifest.get("plan", {}).get("plan_hash") + if not plan_hash: + return None + + # First try the existing helper which handles most common cases + try: + run = create_run_from_manifest(manifest) + if run: + return run + except Exception: + # fall through to manual attempt + pass + + sess = Session() + try: + # Try to find existing run by plan_hash + existing = sess.query(Run).filter(Run.plan_hash == plan_hash).one_or_none() + if existing: + return existing + + # Build a minimal Run object from manifest safely + plan = manifest.get("plan", {}) or {} + # Defensive truncation for safety when constructing Run from manifest 'plan' + try: + strategies_raw = plan.get("strategies", []) + if isinstance(strategies_raw, (list, tuple)): + strategies_mode_raw = ",".join([str(s) for s in strategies_raw]) + else: + strategies_mode_raw = str(strategies_raw) + except Exception: + strategies_mode_raw = "" + + if len(strategies_mode_raw) > 256: + strategies_mode = strategies_mode_raw[:252] + "..." + else: + strategies_mode = strategies_mode_raw + + try: + intervals_raw = plan.get("intervals", []) + if isinstance(intervals_raw, (list, tuple)): + intervals_mode_raw = ",".join([str(i) for i in intervals_raw]) + else: + intervals_mode_raw = str(intervals_raw) + except Exception: + intervals_mode_raw = "" + + if len(intervals_mode_raw) > 256: + intervals_mode = intervals_mode_raw[:252] + "..." + else: + intervals_mode = intervals_mode_raw + + run = Run( + actor=plan.get("actor", "cli"), + action=plan.get("action", "backtest"), + collection_ref=plan.get("collection", ""), + strategies_mode=strategies_mode, + intervals_mode=intervals_mode, + target_metric=plan.get("metric", ""), + period_mode=plan.get("period_mode", ""), + args_json=plan, + plan_hash=plan_hash, + status="running", + ) + sess.add(run) + sess.commit() + return run + except IntegrityError: + # If another process inserted concurrently, return that row + try: + sess.rollback() + return sess.query(Run).filter(Run.plan_hash == plan_hash).one_or_none() + except Exception: + sess.rollback() + return None + except Exception: + try: + sess.rollback() + except Exception: + pass + return None + finally: + sess.close() + + +def find_run_by_plan_hash(plan_hash: str) -> Optional[Run]: + sess = Session() + try: + return sess.query(Run).filter(Run.plan_hash == plan_hash).one_or_none() + finally: + sess.close() diff --git a/src/portfolio/__init__.py b/src/portfolio/__init__.py deleted file mode 100644 index 0ef1192..0000000 --- a/src/portfolio/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Portfolio management and optimization modules.""" diff --git a/src/reporting/__init__.py b/src/reporting/__init__.py index 3059ee9..df20666 100644 --- a/src/reporting/__init__.py +++ b/src/reporting/__init__.py @@ -2,6 +2,8 @@ from __future__ import annotations -from .detailed_portfolio_report import DetailedPortfolioReporter +# The DetailedPortfolioReporter implementation lives in collection_report.py. +# Expose it at package level for callers that import src.reporting.DetailedPortfolioReporter +from .collection_report import DetailedPortfolioReporter __all__ = ["DetailedPortfolioReporter"] diff --git a/src/reporting/ai_report_generator.py b/src/reporting/ai_report_generator.py new file mode 100644 index 0000000..2c23a71 --- /dev/null +++ b/src/reporting/ai_report_generator.py @@ -0,0 +1,157 @@ +"""AI Report Generator for Investment Recommendations.""" + +from __future__ import annotations + +from pathlib import Path + +from src.ai.models import PortfolioRecommendation + + +class AIReportGenerator: + """Generates HTML reports for AI investment recommendations.""" + + def __init__(self): + # Base dir is unified with other exports + self.base_dir = Path("exports/ai_reco") + self.base_dir.mkdir(parents=True, exist_ok=True) + + def generate_html_report( + self, + recommendation: PortfolioRecommendation, + portfolio_name: str, + year: str, + quarter: str, + interval: str, + ) -> str: + """Generate HTML report under exports/ai_reco/// with unified name.""" + + quarterly_dir = self.base_dir / str(year) / str(quarter) + quarterly_dir.mkdir(parents=True, exist_ok=True) + + sanitized = ( + portfolio_name.replace(" ", "_").replace("/", "_").strip("_") + or "All_Collections" + ) + safe_interval = (interval or "multi").replace("/", "-") + filename = f"{sanitized}_Collection_{year}_{quarter}_{safe_interval}.html" + output_path = quarterly_dir / filename + + html_content = self._create_html_content(recommendation, portfolio_name) + + output_path.write_text(html_content, encoding="utf-8") + return str(output_path) + + def _create_html_content( + self, recommendation: PortfolioRecommendation, portfolio_name: str + ) -> str: + """Create HTML content for AI recommendations.""" + + asset_rows = "" + for asset in recommendation.asset_recommendations: + badge_class = ( + "bg-emerald-500/10 text-emerald-300" + if asset.recommendation_type == "BUY" + else "bg-amber-500/10 text-amber-300" + if asset.recommendation_type == "HOLD" + else "bg-rose-500/10 text-rose-300" + ) + + asset_rows += f""" + + {asset.symbol} + {asset.strategy} + {asset.timeframe} + {asset.allocation_percentage:.1f}% + {asset.risk_level} + {asset.confidence_score:.3f} + {asset.sortino_ratio:.3f} + {asset.sharpe_ratio:.3f} + {asset.total_return:.2f}% + {asset.risk_per_trade:.1f}% + {asset.position_size:.1f}% + {asset.stop_loss:.0f} + {asset.take_profit:.0f} + {asset.recommendation_type} + """ + + html_template = f""" + + + + + AI Investment Recommendations: {portfolio_name} + + + + + +
+
+

AI Investment Recommendations

+

Portfolio: {portfolio_name} • Risk Profile: {recommendation.risk_profile.title()}

+ +
+ +
+

Summary

+
+
+
Total Assets
+
{recommendation.total_assets}
+
+
+
Expected Return
+
{recommendation.expected_return:.2f}%
+
+
+
Confidence
+
{recommendation.confidence_score:.3f}
+
+
+

{recommendation.reasoning}

+
+ +
+

Asset Recommendations

+
+ + + + + + + + + + + + + + + + + + + + + {asset_rows} + +
SymbolStrategyTimeframeAllocationRisk LevelConfidenceSortinoSharpeReturnRisk/TradePos. SizeSLTPAction
+
+
+
+ +""" + + # Set CSV link dynamically (same directory, same base name with .csv) + try: + # Simple replacement: add a small inline script to patch href at runtime + html_template += "\n\n" + except Exception: + pass + + return html_template diff --git a/src/reporting/collection_report.py b/src/reporting/collection_report.py new file mode 100644 index 0000000..aaccf7d --- /dev/null +++ b/src/reporting/collection_report.py @@ -0,0 +1,1136 @@ +"""Clean Portfolio Report Generator (DB-sourced, Tailwind-ready) + +This reporter reads only from the database (unified_models lightweight schema) to +render a per-asset HTML report. It prefers detailed stats saved in +unified_models.BacktestResult.engine_ctx and overlays values from +unified_models.BacktestResult.metrics when needed. No JSON files are used. + +Styling: Uses Tailwind. In production, set TAILWIND_CSS_HREF to a built CSS +file (e.g., /assets/tailwind.min.css). If unset and no local CSS is found under +exports/reports/assets/tailwind.min.css, falls back to the CDN for dev only. +""" + +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any, Dict + +from src.reporting.report_organizer import ReportOrganizer + + +class DetailedPortfolioReporter: + """Generates detailed visual reports using only DB data (unified_models).""" + + def __init__(self): + self.report_organizer = ReportOrganizer() + + def generate_comprehensive_report( + self, + portfolio_config: Dict[str, Any], + start_date: str, + end_date: str, + strategies: list[str], + timeframes: list[str] | None = None, + ) -> str: + if timeframes is None: + timeframes = ["1d"] + + symbols = portfolio_config.get("symbols") or [] + assets_data: Dict[str, Dict[str, Any]] = {} + for symbol in symbols: + assets_data[symbol] = self._get_asset_data( + symbol, preferred_timeframes=timeframes or ["1d"] + ) + + html = self._create_html_report( + portfolio_config, + assets_data, + start_date, + end_date, + strategies=strategies, + timeframes=timeframes, + ) + # Choose interval for filename: prefer '1d' if included + interval = "1d" + try: + if timeframes: + interval = "1d" if "1d" in timeframes else timeframes[0] + except Exception: + interval = "1d" + return self._save_report( + html, portfolio_config.get("name") or "portfolio", interval + ) + + def _get_asset_data( + self, symbol: str, preferred_timeframes: list[str] | None = None + ) -> Dict[str, Any]: + try: + from src.database import unified_models as um + except Exception: + um = None + # Primary DB models (fallback for metrics when unified tables are empty) + try: + from src.database import models as dbm + from src.database.db_connection import ( + get_db_session as get_primary_session, # type: ignore[import-not-found] + ) + except Exception: + dbm = None + get_primary_session = None # type: ignore[assignment] + + sess = um.Session() if um else None + try: + # Prefer best strategy for requested timeframes (e.g., ['1d']) + u_bs = None + if um and sess: + try: + q = sess.query(um.BestStrategy).filter( + um.BestStrategy.symbol == symbol + ) + if preferred_timeframes: + q_pref = ( + q.filter( + um.BestStrategy.timeframe.in_(preferred_timeframes) + ) + .order_by(um.BestStrategy.updated_at.desc()) + .limit(1) + ) + u_bs = q_pref.one_or_none() + # Fallback to any timeframe if none found for preference + if not u_bs: + u_bs = ( + q.order_by(um.BestStrategy.updated_at.desc()) + .limit(1) + .one_or_none() + ) + except Exception: + u_bs = None + + # Secondary fallback to primary models BestStrategy (backtests schema) + b_bs = None + if not u_bs and dbm is not None and get_primary_session is not None: + try: + s2 = get_primary_session() + except Exception: + s2 = None + if s2 is not None: + try: + q2 = s2.query(dbm.BestStrategy).filter( + dbm.BestStrategy.symbol == symbol + ) + if preferred_timeframes: + q2 = q2.filter( + dbm.BestStrategy.timeframe.in_(preferred_timeframes) + ) + b_bs = ( + q2.order_by(dbm.BestStrategy.updated_at.desc()) + .limit(1) + .one_or_none() + ) + except Exception: + b_bs = None + finally: + try: + s2.close() + except Exception: + pass + if not u_bs and not b_bs: + return { + "best_strategy": "N/A", + "best_timeframe": "1d", + "data": {"overview": self._empty_overview(), "orders": []}, + } + + timeframe = getattr(u_bs, "timeframe", None) or getattr( + b_bs, "timeframe", "1d" + ) + overview = self._empty_overview() + + def _f(v): + try: + return float(v) if v is not None else 0.0 + except Exception: + return 0.0 + + # Pull from unified BestStrategy or fallback BestStrategy + src_bs = u_bs if u_bs is not None else b_bs + overview["PSR"] = _f(getattr(src_bs, "sortino_ratio", 0)) + overview["sharpe_ratio"] = _f(getattr(src_bs, "sharpe_ratio", 0)) + overview["net_profit"] = _f(getattr(src_bs, "total_return", 0)) + overview["max_drawdown"] = abs(_f(getattr(src_bs, "max_drawdown", 0))) + # optional calmar + try: + overview["calmar_ratio"] = _f(getattr(u_bs, "calmar_ratio", 0)) + except Exception: + pass + + stats_full: Dict[str, Any] = {} + period_start_str: str | None = None + period_end_str: str | None = None + + # Find corresponding BacktestResult for richer stats + br = None + try: + if getattr(u_bs, "backtest_result_id", None): + br = ( + sess.query(um.BacktestResult) + .filter(um.BacktestResult.result_id == u_bs.backtest_result_id) + .one_or_none() + ) + # If BestStrategy doesn't carry result_id, align by the declared best strategy + if not br: + br = ( + sess.query(um.BacktestResult) + .filter(um.BacktestResult.symbol == symbol) + .filter(um.BacktestResult.interval == timeframe) + .filter( + um.BacktestResult.strategy == getattr(u_bs, "strategy", "") + ) + .order_by(um.BacktestResult.end_at_utc.desc().nullslast()) + .first() + ) + # Last fallback: latest any strategy (kept for resilience) + if not br: + br = ( + sess.query(um.BacktestResult) + .filter(um.BacktestResult.symbol == symbol) + .filter(um.BacktestResult.interval == timeframe) + .order_by(um.BacktestResult.end_at_utc.desc().nullslast()) + .first() + ) + except Exception: + br = None + + # Prefer engine_ctx for canonical backtesting library stats + if br and isinstance(br.engine_ctx, dict): + stats_full.update(br.engine_ctx) + # Try to derive period from engine context when DB timestamps are missing + try: + if not period_start_str and isinstance( + stats_full.get("Start"), str + ): + period_start_str = stats_full.get("Start")[:10] + if not period_end_str and isinstance(stats_full.get("End"), str): + period_end_str = stats_full.get("End")[:10] + except Exception: + pass + + # Overlay metrics if engine_ctx lacks fields + if br and isinstance(br.metrics, dict): + m = br.metrics or {} + stats_full.setdefault( + "Sortino Ratio", m.get("sortino_ratio") or m.get("Sortino_Ratio") + ) + stats_full.setdefault( + "Sharpe Ratio", m.get("sharpe_ratio") or m.get("Sharpe_Ratio") + ) + stats_full.setdefault( + "Return [%]", m.get("total_return") or m.get("Total_Return") + ) + stats_full.setdefault( + "Max. Drawdown [%]", m.get("max_drawdown") or m.get("Max_Drawdown") + ) + stats_full.setdefault( + "Win Rate [%]", m.get("win_rate") or m.get("Win_Rate") + ) + + # If unified BacktestResult missing, try fallback primary results to populate overview keys + if not br and b_bs is not None: + try: + overview["PSR"] = ( + _f(getattr(b_bs, "sortino_ratio", 0)) or overview["PSR"] + ) + overview["sharpe_ratio"] = ( + _f(getattr(b_bs, "sharpe_ratio", 0)) or overview["sharpe_ratio"] + ) + overview["net_profit"] = ( + _f(getattr(b_bs, "total_return", 0)) or overview["net_profit"] + ) + md = _f(getattr(b_bs, "max_drawdown", 0)) + if md: + overview["max_drawdown"] = abs(md) + except Exception: + pass + + # Capture period from DB result for display and derived annualized stats + try: + if br: + sd = getattr(br, "start_at_utc", None) + ed = getattr(br, "end_at_utc", None) + if sd and not period_start_str: + try: + period_start_str = sd.date().isoformat() + except Exception: + pass + if ed and not period_end_str: + try: + period_end_str = ed.date().isoformat() + except Exception: + pass + except Exception: + pass + + # Compute Return (Ann.) [%] if possible + try: + if ( + br + and ("Return (Ann.) [%]" not in stats_full) + and stats_full.get("Return [%]") is not None + ): + sd = getattr(br, "start_at_utc", None) + ed = getattr(br, "end_at_utc", None) + if sd and ed: + days = max((ed - sd).days, 1) + total = 1.0 + float(stats_full["Return [%]"]) / 100.0 + ann = (total ** (365.0 / float(days))) - 1.0 + stats_full["Return (Ann.) [%]"] = ann * 100.0 + except Exception: + pass + + # Compute Equity Final if missing from initial_capital + try: + if ( + br + and ("Equity Final [$]" not in stats_full) + and stats_full.get("Return [%]") is not None + ): + init_cap = None + if getattr(br, "run_id", None): + run = ( + sess.query(um.Run) + .filter(um.Run.run_id == br.run_id) + .one_or_none() + ) + if run and isinstance(run.args_json, dict): + init_cap = run.args_json.get("initial_capital") + if init_cap is None: + init_cap = 10000.0 + stats_full["Equity Final [$]"] = float(init_cap) * ( + 1.0 + float(stats_full["Return [%]"]) / 100.0 + ) + except Exception: + pass + + # Push enriched values into overview tiles + def _pull(name_engine: str, key_overview: str): + try: + v = stats_full.get(name_engine) + if v is None: + return + overview[key_overview] = float(v) + except Exception: + pass + + _pull("Sortino Ratio", "PSR") + _pull("Sharpe Ratio", "sharpe_ratio") + _pull("Return [%]", "net_profit") + try: + v = stats_full.get("Max. Drawdown [%]") + if v is not None: + overview["max_drawdown"] = abs(float(v)) + except Exception: + pass + + # Ensure the summary metrics table has sensible defaults even if engine_ctx is missing + # Populate from BestStrategy/overview when BacktestResult engine_ctx is unavailable. + try: + if stats_full is None: + stats_full = {} + # Backfill core fields if absent + if stats_full.get("Sortino Ratio") is None: + stats_full["Sortino Ratio"] = overview.get("PSR") + if stats_full.get("Sharpe Ratio") is None: + stats_full["Sharpe Ratio"] = overview.get("sharpe_ratio") + if stats_full.get("Return [%]") is None: + stats_full["Return [%]"] = overview.get("net_profit") + if stats_full.get("Max. Drawdown [%]") is None: + md = overview.get("max_drawdown") + if md is not None: + # Backtesting.py reports DD as negative percent; keep sign convention for the table + stats_full["Max. Drawdown [%]"] = -abs(float(md)) + except Exception: + pass + + # Trades: prefer normalized Trade table, else parse trades_raw + trades: list[dict] = [] + try: + if br and getattr(br, "result_id", None): + rows = ( + sess.query(um.Trade) + .filter(um.Trade.result_id == br.result_id) + .order_by(um.Trade.trade_index.asc()) + .all() + ) + for t in rows: + trades.append( + { + "idx": getattr(t, "trade_index", None), + "entry_time": getattr(t, "entry_time", None), + "exit_time": getattr(t, "exit_time", None), + "entry_bar": getattr(t, "entry_bar", None), + "exit_bar": getattr(t, "exit_bar", None), + "entry_price": getattr(t, "entry_price", None), + "exit_price": getattr(t, "exit_price", None), + "size": getattr(t, "size", None), + "pnl": getattr(t, "pnl", None), + "duration": getattr(t, "duration", None), + "tag": getattr(t, "tag", None), + } + ) + elif br and getattr(br, "trades_raw", None): + try: + raw = json.loads(br.trades_raw) + if isinstance(raw, list): + for i, tr in enumerate(raw): + if not isinstance(tr, dict): + continue + trades.append( + { + "idx": tr.get("index") or i, + "entry_time": tr.get("EntryTime") + or tr.get("entry_time"), + "exit_time": tr.get("ExitTime") + or tr.get("exit_time"), + "entry_bar": tr.get("entry_bar") + or tr.get("EntryBar") + or tr.get("entry"), + "exit_bar": tr.get("exit_bar") + or tr.get("ExitBar") + or tr.get("exit"), + "entry_price": tr.get("entry_price"), + "exit_price": tr.get("exit_price"), + "size": tr.get("size"), + "pnl": tr.get("pnl"), + "duration": tr.get("duration"), + "tag": tr.get("tag"), + } + ) + except Exception: + pass + except Exception: + trades = [] + + # As a last resort, attempt to derive total_orders from primary DB Trade rows if unified has none + if not trades and dbm is not None and get_primary_session is not None: + try: + s3 = get_primary_session() + except Exception: + s3 = None + if s3 is not None: + try: + cnt = ( + s3.query(dbm.Trade) + .filter(dbm.Trade.symbol == symbol) + .count() + ) + if cnt and cnt > 0: + overview["total_orders"] = int(cnt) + except Exception: + pass + finally: + try: + s3.close() + except Exception: + pass + + # Set total_orders from persisted trades; do not compute trades locally + try: + overview["total_orders"] = len(trades) + except Exception: + overview["total_orders"] = 0 + + return { + "best_strategy": ( + getattr(u_bs, "strategy", None) + or getattr(b_bs, "strategy", "") + or "N/A" + ), + "best_timeframe": timeframe, + "stats_full": stats_full, + "data": {"overview": overview, "orders": trades}, + "period_start": period_start_str, + "period_end": period_end_str, + } + finally: + if sess is not None: + try: + sess.close() + except Exception: + pass + + def _empty_overview(self) -> Dict[str, Any]: + return { + "PSR": 0.0, + "sharpe_ratio": 0.0, + "total_orders": 0, + "net_profit": 0.0, + "max_drawdown": 0.0, + "calmar_ratio": 0.0, + } + + def _create_html_report( + self, + portfolio_config: dict, + assets_data: dict, + start_date: str, + end_date: str, + strategies: list[str] | None = None, + timeframes: list[str] | None = None, + ) -> str: + # Tailwind include: prefer local stylesheet or env var; fallback to CDN in dev + try: + import os + + tw_href = os.environ.get("TAILWIND_CSS_HREF", "").strip() + # If env var points to a local path that doesn't exist, ignore it to allow CDN fallback + if tw_href and not tw_href.startswith(("http://", "https://")): + try: + if not Path(tw_href).exists(): + tw_href = "" + except Exception: + tw_href = "" + if not tw_href: + cand = Path("exports/reports/assets/tailwind.min.css") + if cand.exists(): + tw_href = str(cand) + tailwind_tag = ( + f'' + if tw_href + else '' + ) + except Exception: + tailwind_tag = '' + # Plotly include (for inline equity charts) + plotly_tag = '' + + # Top overview (computed from assets_data) + total_assets = len(assets_data) + avg_sortino = 0.0 + winners = 0 + traders = 0 + vals = [] + for data in assets_data.values(): + ov = (data.get("data") or {}).get("overview") or {} + try: + vals.append(float(ov.get("PSR", 0) or 0)) + except Exception: + pass + try: + if float(ov.get("net_profit", 0) or 0) > 0: + winners += 1 + except Exception: + pass + try: + if int(ov.get("total_orders", 0) or 0) > 0: + traders += 1 + except Exception: + pass + if vals: + avg_sortino = sum(vals) / len(vals) + + # Backtest settings card (strategies, intervals, period) + strat_list = ", ".join(strategies or []) + tf_list = ", ".join(timeframes or []) + # Prefer derived period from assets_data (global earliest start, latest end) + try: + derived_starts = [] + derived_ends = [] + for v in assets_data.values(): + ps = v.get("period_start") + pe = v.get("period_end") + if isinstance(ps, str) and len(ps) >= 10: + derived_starts.append(ps[:10]) + if isinstance(pe, str) and len(pe) >= 10: + derived_ends.append(pe[:10]) + derived_start = min(derived_starts) if derived_starts else None + derived_end = max(derived_ends) if derived_ends else None + except Exception: + derived_start = None + derived_end = None + + period_str = ( + f"{derived_start} → {derived_end}" + if (derived_start and derived_end) + else (f"{start_date} → {end_date}" if (start_date and end_date) else "max") + ) + settings_card = f""" +
+ Backtest Settings +
+
+
Intervals
{tf_list or "-"}
+
Strategies
{strat_list or "-"}
+
Period
{period_str}
+
+
+
+ """ + + top_overview = f""" +
+
+
Assets
+
{total_assets}
+
+
+
Avg Sortino
+
{avg_sortino:.3f}
+
+
+
Positive Returns
+
{winners}
+
+
+
With Trades
+
{traders}
+
+
+ """ + + # Sidebar TOC (TailAdmin-style): sticky on large screens, compact chips on mobile + toc_items = [ + f'
  • {sym}
  • ' + for sym in assets_data.keys() + ] + sidebar_html = ( + '" + ) + # Mobile chips + chips_html = ( + '
    ' + '
    ' + + "".join( + [ + f'{sym}' + for sym in assets_data.keys() + ] + ) + + "
    " + "
    " + ) + + # Asset sections + asset_sections = [] + for symbol, data in assets_data.items(): + overview = (data.get("data") or {}).get("overview") or {} + stats = data.get("stats_full") or {} + + def fmt(v: Any, prec=2, pct=False, money=False) -> str: + try: + if v is None: + return "-" + f = float(v) + if money: + return f"${f:,.{prec}f}" + if pct: + return f"{f:.{prec}f}%" + return f"{f:.{prec}f}" + except Exception: + return str(v) if v is not None else "-" + + metrics_row = f""" +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Equity FinalCommissionsReturnBuy & Hold ReturnSortinoSharpeReturn (Ann.)Max DDWin Rate
    {fmt(stats.get("Equity Final [$]"), 2, money=True)}{fmt(stats.get("Commissions [$]"), 2, money=True)}{fmt(stats.get("Return [%]"), 2, pct=True)}{fmt(stats.get("Buy & Hold Return [%]"), 2, pct=True)}{fmt(stats.get("Sortino Ratio"), 3)}{fmt(stats.get("Sharpe Ratio"), 3)}{fmt(stats.get("Return (Ann.) [%]"), 2, pct=True)}{fmt(stats.get("Max. Drawdown [%]"), 2, pct=True)}{fmt(stats.get("Win Rate [%]"), 2, pct=True)}
    +
    + """ + + # Build simple sparkline from any available equity series + equity_series = [] + try: + for k in ( + "equity_curve", + "equity", + "equity_values", + "Equity Curve", + "equity_series", + ): + v = stats.get(k) + if isinstance(v, (list, tuple)) and len(v) >= 2: + equity_series = [float(x) for x in v if x is not None] + break + # Backtesting.py direct stats often store '_equity_curve' as list of dicts + if not equity_series: + v2 = stats.get("_equity_curve") + if isinstance(v2, list) and len(v2) >= 2: + try: + pts = [] + for row in v2: + if isinstance(row, dict) and "Equity" in row: + pts.append(float(row.get("Equity"))) + if len(pts) >= 2: + equity_series = pts + except Exception: + pass + if not equity_series and isinstance(stats.get("series"), list): + s0 = stats["series"][0] + if isinstance(s0, dict) and isinstance(s0.get("y"), list): + equity_series = [float(x) for x in s0["y"] if x is not None] + except Exception: + equity_series = [] + + def _spark(points: list[float], width=600, height=80) -> str: + try: + if not points or len(points) < 2: + return "" + mn = min(points) + mx = max(points) + rng = (mx - mn) or 1.0 + step = width / (len(points) - 1) + cmds = [] + for i, v in enumerate(points): + x = i * step + y = height - ((float(v) - mn) / rng) * height + cmds.append(("M" if i == 0 else "L") + f" {x:.2f} {y:.2f}") + d = " ".join(cmds) + return ( + f'' + f'' + "" + ) + except Exception: + return "" + + spark = _spark(equity_series) + # Fallback: embed plot HTML if provided by engine_ctx + plot_embed = None + try: + for k in ("plot_html", "plot_div", "plot", "chart_html"): + v = stats.get(k) + if isinstance(v, str) and (" str: + try: + import re as _re + + return _re.sub(r"[^A-Za-z0-9_\-]", "_", s) + except Exception: + return s + + def _plotly_equity( + sym: str, eq: list[float], stats_obj: dict, orders: list[dict] + ) -> str: + try: + import json as _json + + if not eq or len(eq) < 2: + return "" + x = list(range(len(eq))) + dd = [] + try: + if isinstance(stats_obj.get("_equity_curve"), list): + vals = [] + has_dd = False + for r in stats_obj["_equity_curve"]: + if ( + isinstance(r, dict) + and r.get("DrawdownPct") is not None + ): + has_dd = True + vals.append(float(r.get("DrawdownPct"))) + if has_dd and len(vals) == len(eq): + dd = vals + except Exception: + dd = [] + div_id = f"plot_{_safe_id(sym)}" + data = [ + { + "x": x, + "y": eq, + "type": "scatter", + "mode": "lines", + "name": "Equity", + "line": {"color": "#22d3ee"}, + } + ] + layout = { + "margin": {"l": 30, "r": 10, "t": 10, "b": 30}, + "paper_bgcolor": "rgba(0,0,0,0)", + "plot_bgcolor": "rgba(0,0,0,0)", + "xaxis": {"showgrid": False, "zeroline": False}, + "yaxis": {"showgrid": False, "zeroline": False}, + "showlegend": True, + } + if dd: + data.append( + { + "x": x, + "y": dd, + "type": "scatter", + "mode": "lines", + "name": "Drawdown [%]", + "line": {"color": "#f43f5e"}, + "yaxis": "y2", + } + ) + layout["yaxis2"] = {"overlaying": "y", "side": "right"} + + # Buy & Hold overlay if metric present + try: + bnh = stats_obj.get("Buy & Hold Return [%]") + if bnh is not None and len(eq) >= 2: + eq0 = float(eq[0]) + eq_bnh_end = eq0 * (1.0 + float(bnh) / 100.0) + # Linear interpolation for lack of series + y_bnh = [ + eq0 + (eq_bnh_end - eq0) * (i / (len(x) - 1)) + for i in range(len(x)) + ] + data.append( + { + "x": x, + "y": y_bnh, + "type": "scatter", + "mode": "lines", + "name": "Buy & Hold", + "line": {"color": "#a3e635", "dash": "dash"}, + } + ) + except Exception: + pass + + # Entry/Exit markers from orders using entry_bar/exit_bar indices + try: + entries_x = [] + entries_y = [] + exits_x = [] + exits_y = [] + for od in orders or []: + eb = od.get("entry_bar") + xb = od.get("exit_bar") + if isinstance(eb, (int, float)) and 0 <= int(eb) < len(eq): + idx = int(eb) + entries_x.append(x[idx]) + entries_y.append(eq[idx]) + if isinstance(xb, (int, float)) and 0 <= int(xb) < len(eq): + idx = int(xb) + exits_x.append(x[idx]) + exits_y.append(eq[idx]) + if entries_x: + data.append( + { + "x": entries_x, + "y": entries_y, + "type": "scatter", + "mode": "markers", + "name": "Entry", + "marker": { + "color": "#22c55e", + "size": 6, + "symbol": "triangle-up", + }, + } + ) + if exits_x: + data.append( + { + "x": exits_x, + "y": exits_y, + "type": "scatter", + "mode": "markers", + "name": "Exit", + "marker": { + "color": "#ef4444", + "size": 6, + "symbol": "triangle-down", + }, + } + ) + except Exception: + pass + payload = _json.dumps( + { + "data": data, + "layout": layout, + "config": {"displayModeBar": False, "responsive": True}, + } + ) + return ( + f'
    ' + f"" + ) + except Exception: + return "" + + plotly_plot = _plotly_equity( + symbol, + equity_series, + stats, + (data.get("data") or {}).get("orders") or [], + ) + placeholder_plot = plot_embed or plotly_plot or spark + if not placeholder_plot: + placeholder_plot = '
    Plotting disabled in this environment.
    ' + plot_section = f""" +

    Equity Curve

    +
    {placeholder_plot}
    + """ + + # Trades table if any + trades = (data.get("data") or {}).get("orders") or [] + trades_html = "" + if isinstance(trades, list) and trades: + trade_rows = [] + for tr in trades[:200]: + + def _fmt_dt(v): + try: + import datetime as _dt + + if v is None: + return "" + if isinstance(v, str): + return v + if isinstance(v, (_dt.datetime, _dt.date)): + return v.isoformat() + except Exception: + return str(v) if v is not None else "" + return str(v) + + trade_rows.append( + f"" + f'{tr.get("idx", "")}' + f'{_fmt_dt(tr.get("entry_time"))}' + f'{_fmt_dt(tr.get("exit_time"))}' + f'{tr.get("size", "")}' + f'{tr.get("entry_price", "")}' + f'{tr.get("exit_price", "")}' + f'{tr.get("pnl", "")}' + f'{tr.get("duration", "")}' + f'{tr.get("tag", "")}' + f"" + ) + trades_html = f""" +

    Trades

    +
    + + + + + + + + + + + + + + + {"".join(trade_rows)} +
    #Entry TimeExit TimeSizeEntryExitPnLDurationTag
    +
    + """ + + asset_sections.append( + f""" +
    +
    +

    {symbol}

    +
    + Best: {(data.get("best_strategy") or "N/A")} + ⏰ {data.get("best_timeframe", "1d")} +
    +
    +

    Summary Metrics

    + {metrics_row} +
    +
    +
    Sortino
    +
    {overview.get("PSR", 0):.3f}
    +
    +
    +
    Sharpe
    +
    {overview.get("sharpe_ratio", 0):.3f}
    +
    +
    +
    Orders
    +
    {int(overview.get("total_orders", 0) or 0)}
    +
    +
    +
    Net Profit
    +
    0 else "text-rose-400")}\">{overview.get("net_profit", 0):.2f}%
    +
    +
    +
    Max Drawdown
    +
    -{overview.get("max_drawdown", 0):.2f}%
    +
    +
    +
    Calmar
    +
    {overview.get("calmar_ratio", 0):.3f}
    +
    +
    + {plot_section} + {trades_html} +
    + """ + ) + + # Use double braces for literal braces in .format() + # Footer: educational disclaimer + project link + footer_html = ( + '
    ' + '
    ' + "This report is for educational purposes only and does not constitute financial advice. " + 'Project: quant-system.' + "
    " + "
    " + ) + html_template = """ + + + + Collection Analysis: {{portfolio_name}} + {tailwind_tag} + {plotly_tag} + + + +
    +
    +

    {{portfolio_name}}

    +

    Real Backtesting Data • {{start_date}} → {{end_date}}

    +
    +
    + {sidebar_html} +
    + {settings_card} + {top_overview} + {chips_html} + {{asset_sections}} +
    +
    +
    + {footer_html} + + +""" + + # Brace-safe rendering: protect placeholders, escape all braces, then restore placeholders + tokens = { + "[[PORTFOLIO_NAME]]": "{portfolio_name}", + "[[START_DATE]]": "{start_date}", + "[[END_DATE]]": "{end_date}", + "[[ASSET_SECTIONS]]": "{asset_sections}", + "[[TAILWIND_TAG]]": "{tailwind_tag}", + "[[PLOTLY_TAG]]": "{plotly_tag}", + "[[SIDEBAR_HTML]]": "{sidebar_html}", + "[[TOP_OVERVIEW]]": "{top_overview}", + "[[CHIPS_HTML]]": "{chips_html}", + "[[SETTINGS_CARD]]": "{settings_card}", + "[[FOOTER_HTML]]": "{footer_html}", + } + # Mark placeholders + html_template_marked = ( + html_template.replace("{{portfolio_name}}", "[[PORTFOLIO_NAME]]") + .replace("{{start_date}}", "[[START_DATE]]") + .replace("{{end_date}}", "[[END_DATE]]") + .replace("{{asset_sections}}", "[[ASSET_SECTIONS]]") + .replace("{tailwind_tag}", "[[TAILWIND_TAG]]") + .replace("{plotly_tag}", "[[PLOTLY_TAG]]") + .replace("{sidebar_html}", "[[SIDEBAR_HTML]]") + .replace("{top_overview}", "[[TOP_OVERVIEW]]") + .replace("{chips_html}", "[[CHIPS_HTML]]") + .replace("{settings_card}", "[[SETTINGS_CARD]]") + .replace("{footer_html}", "[[FOOTER_HTML]]") + ) + # Escape all remaining braces so they render literally + html_template_escaped = html_template_marked.replace("{", "{{").replace( + "}", "}}" + ) + # Restore placeholders + for t, ph in tokens.items(): + html_template_escaped = html_template_escaped.replace(t, ph) + + # Choose header dates: prefer derived period + header_start = derived_start or start_date + header_end = derived_end or end_date + + return html_template_escaped.format( + portfolio_name=portfolio_config.get("name") or "Portfolio", + start_date=header_start, + end_date=header_end, + asset_sections="\n".join(asset_sections), + tailwind_tag=tailwind_tag, + plotly_tag=plotly_tag, + sidebar_html=sidebar_html, + top_overview=top_overview, + chips_html=chips_html, + settings_card=settings_card, + footer_html=footer_html, + ) + + def _save_report( + self, html_content: str, portfolio_name: str, interval: str + ) -> str: + # Save via organizer using unified naming (exports/reports//Q/_Collection__Q_.html) + tmp = Path("temp_report.html") + tmp.write_text(html_content, encoding="utf-8") + try: + return str( + self.report_organizer.organize_report( + str(tmp), portfolio_name, None, interval=interval + ) + ) + finally: + if tmp.exists(): + tmp.unlink() diff --git a/src/reporting/detailed_portfolio_report.py b/src/reporting/detailed_portfolio_report.py deleted file mode 100644 index 2d989bb..0000000 --- a/src/reporting/detailed_portfolio_report.py +++ /dev/null @@ -1,400 +0,0 @@ -"""Clean Portfolio Report Generator. - -Uses only real data from database and backtesting library. -""" - -from __future__ import annotations - -import sys -from pathlib import Path - -sys.path.append(str(Path(__file__).parent.parent)) - -from sqlalchemy.orm import sessionmaker - -from src.database.db_connection import get_sync_engine -from src.database.models import BestStrategy, Trade -from utils.report_organizer import ReportOrganizer - - -class DetailedPortfolioReporter: - """Generates detailed visual reports using only real database data.""" - - def __init__(self): - self.report_organizer = ReportOrganizer() - - def generate_comprehensive_report( - self, - portfolio_config: dict, - start_date: str, - end_date: str, - strategies: list[str], - timeframes: list[str] | None = None, - ) -> str: - """Generate a comprehensive HTML report using real database data.""" - - if timeframes is None: - timeframes = ["1d"] - - # Get real data for each asset from database - assets_data = {} - for symbol in portfolio_config["symbols"]: - asset_data = self._get_real_asset_data(symbol) - if asset_data: - assets_data[symbol] = asset_data - - # Generate HTML report with real data - html_content = self._create_html_report( - portfolio_config, assets_data, start_date, end_date - ) - - # Save report - return self._save_compressed_report(html_content, portfolio_config["name"]) - - def _get_real_asset_data(self, symbol: str) -> dict | None: - """Get real asset data from database.""" - engine = get_sync_engine() - Session = sessionmaker(bind=engine) - session = Session() - - try: - # Get best strategy for this symbol - best_strategy = ( - session.query(BestStrategy) - .filter_by(symbol=symbol) - .order_by(BestStrategy.sortino_ratio.desc()) - .first() - ) - - if not best_strategy: - return None - - # Get trades for this strategy - trades = ( - session.query(Trade) - .filter_by( - symbol=symbol, - strategy=best_strategy.strategy, - timeframe=best_strategy.timeframe, - ) - .order_by(Trade.trade_datetime) - .all() - ) - - # Convert trades to entry/exit pairs only - orders = [] - - # Group trades by consecutive BUY→SELL pairs - i = 0 - while i < len(trades): - trade = trades[i] - - if ( - trade.side == "BUY" - and i + 1 < len(trades) - and trades[i + 1].side == "SELL" - ): - # Found a proper BUY→SELL pair - buy_trade = trade - sell_trade = trades[i + 1] - - # Calculate P&L for the pair - pnl = (float(sell_trade.price) - float(buy_trade.price)) * float( - sell_trade.size - ) - - # Add entry order - orders.append( - { - "date": buy_trade.trade_datetime.strftime("%Y-%m-%d"), - "type": "ENTRY", - "price": float(buy_trade.price), - "size": float(buy_trade.size), - "equity": float(buy_trade.equity_after or 0), - } - ) - - # Add exit order with P&L - orders.append( - { - "date": sell_trade.trade_datetime.strftime("%Y-%m-%d"), - "type": f"EXIT ({'+' if pnl >= 0 else ''}{pnl:.2f})", - "price": float(sell_trade.price), - "size": float(sell_trade.size), - "equity": float(sell_trade.equity_after or 0), - } - ) - - i += 2 # Skip both trades - else: - # Skip unpaired trades (shouldn't happen with proper implementation) - i += 1 - - return { - "best_strategy": best_strategy.strategy, - "best_timeframe": best_strategy.timeframe, - "best_score": float(best_strategy.sortino_ratio or 0), - "data": { - "overview": { - "PSR": float(best_strategy.sortino_ratio or 0), - "sharpe_ratio": float(best_strategy.sharpe_ratio or 0), - "total_orders": len(orders), - "net_profit": float(best_strategy.total_return or 0), - "max_drawdown": abs(float(best_strategy.max_drawdown or 0)), - "calmar_ratio": float(best_strategy.calmar_ratio or 0), - "best_timeframe": best_strategy.timeframe, - # Calculated values - "average_win": 0, - "average_loss": 0, - "win_rate": 0, - }, - "orders": orders, - "equity_curve": self._generate_simple_equity_curve(orders), - "benchmark_curve": [], - }, - } - - finally: - session.close() - - def _generate_simple_equity_curve(self, orders: list) -> list: - """Generate simple equity curve from real trades.""" - if not orders: - return [] - - equity_curve = [] - - for order in orders: - equity_curve.append({"date": order["date"], "equity": order["equity"]}) - - return equity_curve - - def _generate_backtest_plot( - self, symbol: str, strategy: str, timeframe: str, start_date: str, end_date: str - ) -> str: - """Generate interactive plot from backtesting library.""" - try: - import logging - - from bokeh.embed import file_html - from bokeh.resources import CDN - - from src.core.direct_backtest import run_direct_backtest - - logging.getLogger("bokeh").setLevel(logging.WARNING) - - # Run fresh backtest to get plot - result = run_direct_backtest( - symbol=symbol, - strategy_name=strategy, - start_date=start_date, - end_date=end_date, - timeframe=timeframe, - ) - - # Check for backtest object in result - backtest_obj = result.get("backtest_object") - if backtest_obj is not None: - try: - # Generate the interactive plot with custom styling - plot = backtest_obj.plot( - plot_width=900, - show_legend=True, - open_browser=False, - plot_trades=True, - plot_equity=True, - ) - - # Convert to HTML string with proper resources - html = file_html(plot, CDN, f"{symbol} - {strategy} ({timeframe})") - - # Extract just the plot content (remove outer HTML structure) - if '
    " in html: - start_idx = html.find('
    ") + 9 - if start_idx != -1 and end_idx != -1: - return html[start_idx:end_idx] - - return html - - except Exception as plot_error: - return f'

    Plot generation error: {plot_error!s}

    ' - else: - return '

    Backtest object not available

    ' - - except ImportError as e: - return f'

    Missing plotting dependencies: {e}

    ' - except Exception as e: - print(f"Warning: Could not generate plot for {symbol}/{strategy}: {e}") - return f'

    Plot generation failed: {e!s}

    ' - - def _create_html_report( - self, portfolio_config: dict, assets_data: dict, start_date: str, end_date: str - ) -> str: - """Create HTML report using real data.""" - - html_template = """ - - - - Portfolio Analysis: {portfolio_name} - - - -
    -
    -

    {portfolio_name}

    -

    Real Backtesting Data • {start_date} to {end_date}

    -
    - {asset_sections} -
    - -""" - - # Generate asset sections using real data - asset_sections = "" - for symbol, data in assets_data.items(): - overview = data["data"]["overview"] - orders = data["data"]["orders"] - - # Create orders table - orders_html = "" - if orders: - orders_html = """ - - - - - - - - - - - """ - - for order in orders: - order_type = order["type"] - css_class = ( - "entry" - if order_type.startswith("ENTRY") - else "exit" - if order_type.startswith("EXIT") - else order_type.lower() - ) - orders_html += f''' - - - - - - - ''' - - orders_html += """ - -
    DateTypePriceSizeEquity After
    {order["date"]}{order_type}${order["price"]:.2f}{order["size"]:.0f}${order["equity"]:,.2f}
    """ - else: - orders_html = "

    No trades recorded for this strategy.

    " - - asset_sections += f""" -
    -
    -

    {symbol}

    -
    - Best: {data["best_strategy"].title()} - ⏰ {data["best_timeframe"]} -
    -
    - -
    -
    -
    Sortino Ratio
    -
    {overview["PSR"]:.3f}
    -
    -
    -
    Sharpe Ratio
    -
    {overview["sharpe_ratio"]:.3f}
    -
    -
    -
    Total Orders
    -
    {overview["total_orders"]}
    -
    -
    -
    Net Profit
    -
    0 else "negative"}">{overview["net_profit"]:.2f}%
    -
    -
    -
    Max Drawdown
    -
    -{overview["max_drawdown"]:.2f}%
    -
    -
    -
    Calmar Ratio
    -
    {overview.get("calmar_ratio", 0):.3f}
    -
    -
    - -

    Interactive Chart

    -
    - {self._generate_backtest_plot(symbol, data["best_strategy"], data["best_timeframe"], start_date, end_date)} -
    - -

    Trading Orders

    - {orders_html} -
    """ - - return html_template.format( - portfolio_name=portfolio_config["name"], - start_date=start_date, - end_date=end_date, - asset_sections=asset_sections, - ) - - def _save_compressed_report(self, html_content: str, portfolio_name: str) -> str: - """Save compressed HTML report.""" - # Use the existing report organizer - temp_path = Path("temp_report.html") - temp_path.write_text(html_content, encoding="utf-8") - - try: - organized_path = self.report_organizer.organize_report( - str(temp_path), portfolio_name - ) - return organized_path - finally: - if temp_path.exists(): - temp_path.unlink() diff --git a/src/reporting/detailed_portfolio_report_old.py b/src/reporting/detailed_portfolio_report_old.py deleted file mode 100644 index ad9ad01..0000000 --- a/src/reporting/detailed_portfolio_report_old.py +++ /dev/null @@ -1,980 +0,0 @@ -"""Detailed Portfolio Report Generator. - -Creates comprehensive visual reports for portfolio analysis with KPIs, orders, and charts. -""" - -from __future__ import annotations - -import gzip -import json -import sys -from datetime import datetime, timedelta, timezone -from pathlib import Path - -import numpy as np - -sys.path.append(str(Path(__file__).parent.parent)) - -from utils.report_organizer import ReportOrganizer - - -class DetailedPortfolioReporter: - """Generates detailed visual reports for portfolio analysis.""" - - def __init__(self): - self.report_data = {} - self.report_organizer = ReportOrganizer() - self.rng = np.random.default_rng() - - def generate_comprehensive_report( - self, - portfolio_config: dict, - start_date: str, - end_date: str, - strategies: list[str], - timeframes: list[str] | None = None, - ) -> str: - """Generate a comprehensive HTML report for the portfolio.""" - - if timeframes is None: - timeframes = ["1d"] - - # Generate data for each asset - assets_data = {} - for symbol in portfolio_config["symbols"]: - best_combo, asset_data = self._analyze_asset_with_timeframes( - symbol, strategies, timeframes, start_date, end_date - ) - assets_data[symbol] = { - "best_strategy": best_combo["strategy"], - "best_timeframe": best_combo["timeframe"], - "best_score": best_combo["score"], - "data": asset_data, - } - - # Generate HTML report - html_content = self._create_html_report( - portfolio_config, assets_data, start_date, end_date - ) - - # Compress and save - return self._save_compressed_report(html_content, portfolio_config["name"]) - - def _analyze_asset_with_timeframes( - self, - symbol: str, - strategies: list[str], - timeframes: list[str], - start_date: str, - end_date: str, - ) -> tuple[dict, dict]: - """Analyze an asset using best strategy from database.""" - - from sqlalchemy.orm import sessionmaker - - from src.database.db_connection import get_sync_engine - from src.database.models import BestStrategy - - # Get best strategy from database - engine = get_sync_engine() - Session = sessionmaker(bind=engine) - session = Session() - - try: - # Get the best strategy for this symbol from database - best_strategy_db = ( - session.query(BestStrategy) - .filter_by(symbol=symbol) - .order_by(BestStrategy.sortino_ratio.desc()) - .first() - ) - - if best_strategy_db: - # Use database best strategy - best_strategy = best_strategy_db.strategy - best_timeframe = best_strategy_db.timeframe - best_metrics = { - "sharpe_ratio": float(best_strategy_db.sharpe_ratio or 0), - "sortino_ratio": float(best_strategy_db.sortino_ratio or 0), - "calmar_ratio": float(best_strategy_db.calmar_ratio or 0), - "total_return": float(best_strategy_db.total_return or 0), - "max_drawdown": float(best_strategy_db.max_drawdown or 0), - "win_rate": 0.0, # Not stored in BestStrategy table - "volatility": 0.0, # Not stored in BestStrategy table - "num_trades": 0, # Not stored in BestStrategy table - } - else: - # Fallback if not in database - best_strategy = strategies[0] - best_timeframe = timeframes[0] - best_metrics = self._simulate_strategy_timeframe_performance( - symbol, best_strategy, best_timeframe - ) - - best_combination = { - "strategy": best_strategy, - "timeframe": best_timeframe, - "score": best_metrics["sortino_ratio"], - "metrics": best_metrics, - } - - finally: - session.close() - - # Generate detailed data for best combination - asset_data = self._generate_detailed_metrics_with_timeframe( - symbol, - best_combination["strategy"], - best_combination["timeframe"], - start_date, - end_date, - ) - - return best_combination, asset_data - - def _analyze_asset( - self, symbol: str, strategies: list[str], start_date: str, end_date: str - ) -> tuple[str, dict]: - """Analyze an asset and return the best strategy with detailed metrics.""" - - # Simulate strategy comparison (replace with actual backtesting when fixed) - strategy_scores = {} - for strategy in strategies: - score = self._simulate_strategy_performance(symbol, strategy) - strategy_scores[strategy] = score - - # Get best strategy - best_strategy = max(strategy_scores.items(), key=lambda x: x[1]["sharpe_ratio"]) - - # Generate detailed data for best strategy - asset_data = self._generate_detailed_metrics( - symbol, best_strategy[0], start_date, end_date - ) - - return best_strategy[0], asset_data - - def _simulate_strategy_timeframe_performance( - self, symbol: str, strategy: str, timeframe: str - ) -> dict: - """Get real strategy+timeframe performance from database or direct backtest.""" - from sqlalchemy.orm import sessionmaker - - from src.database.db_connection import get_sync_engine - from src.database.models import BacktestResult - - # First try to get from database - engine = get_sync_engine() - Session = sessionmaker(bind=engine) - session = Session() - - try: - # Look for existing backtest result - result = ( - session.query(BacktestResult) - .filter_by(symbols=[symbol], strategy=strategy, timeframe=timeframe) - .first() - ) - - if result: - return { - "sharpe_ratio": float(result.sharpe_ratio or 0), - "sortino_ratio": float(result.sortino_ratio or 0), - "calmar_ratio": float(result.calmar_ratio or 0), - "total_return": float(result.total_return or 0), - "max_drawdown": float(result.max_drawdown or 0), - "volatility": float(result.volatility or 0), - "win_rate": float(result.win_rate or 0), - "num_trades": int(result.trades_count or 0), - } - # Fallback to direct backtest if not in database - - from src.core.direct_backtest import run_direct_backtest - - backtest_result = run_direct_backtest( - symbol=symbol, - strategy_name=strategy, - start_date="2015-01-01", - end_date="2025-08-20", - timeframe=timeframe, - ) - return backtest_result["metrics"] - - except Exception as e: - print(f"Error getting real data for {symbol}/{strategy}/{timeframe}: {e}") - # Return minimal defaults if everything fails - return { - "sharpe_ratio": 0.0, - "sortino_ratio": 0.0, - "calmar_ratio": 0.0, - "total_return": 0.0, - "max_drawdown": 0.0, - "volatility": 0.0, - "win_rate": 0.0, - "num_trades": 0, - } - finally: - session.close() - - def _simulate_strategy_performance(self, symbol: str, strategy: str) -> dict: - """Simulate strategy performance (replace with actual backtesting).""" - rng = np.random.default_rng(hash(symbol + strategy) % 2147483647) - - return { - "sharpe_ratio": rng.uniform(0.2, 2.5), - "total_return": rng.uniform(-20, 80), - "max_drawdown": rng.uniform(-30, -5), - "win_rate": rng.uniform(0.25, 0.70), - } - - def _generate_detailed_metrics( - self, symbol: str, strategy: str, start_date: str, end_date: str - ) -> dict: - """Generate detailed metrics for an asset/strategy combination.""" - rng = np.random.default_rng(hash(symbol + strategy) % 2147483647) - - # Generate realistic trading data - start = datetime.strptime(start_date, "%Y-%m-%d") - end = datetime.strptime(end_date, "%Y-%m-%d") - (end - start).days - - # Basic metrics - initial_equity = 10000 - total_return = rng.uniform(10, 50) # 10-50% - final_equity = initial_equity * (1 + total_return / 100) - - # Generate orders - num_orders = rng.integers(50, 500) - orders = self._generate_orders(symbol, start, end, num_orders, initial_equity) - - # Calculate metrics - return { - "overview": { - "PSR": rng.uniform(0.40, 0.95), - "sharpe_ratio": rng.uniform(0.2, 2.1), - "total_orders": num_orders, - "average_win": rng.uniform(15, 35), - "average_loss": rng.uniform(-8, -2), - "compounding_annual_return": total_return, - "drawdown": rng.uniform(-25, -5), - "expectancy": rng.uniform(0.5, 2.0), - "start_equity": initial_equity, - "end_equity": final_equity, - "net_profit": (final_equity - initial_equity) / initial_equity * 100, - "sortino_ratio": rng.uniform(0.2, 1.8), - "loss_rate": rng.uniform(0.4, 0.8), - "win_rate": rng.uniform(0.2, 0.6), - "profit_loss_ratio": rng.uniform(2, 8), - "alpha": rng.uniform(-0.1, 0.2), - "beta": rng.uniform(0.5, 2.0), - "annual_std": rng.uniform(0.15, 0.4), - "annual_variance": rng.uniform(0.02, 0.16), - "information_ratio": rng.uniform(0.1, 1.2), - "tracking_error": rng.uniform(0.1, 0.5), - "treynor_ratio": rng.uniform(0.02, 0.15), - "total_fees": rng.uniform(500, 5000), - "strategy_capacity": rng.uniform(100000, 5000000), - "lowest_capacity_asset": f"{symbol} R735QTJ8XC9X", - "portfolio_turnover": rng.uniform(0.3, 2.5), - }, - "orders": orders, - "equity_curve": self._generate_equity_curve( - start, end, initial_equity, final_equity - ), - "benchmark_curve": self._generate_benchmark_curve( - symbol, start, end, initial_equity - ), - "symbol": symbol, - "strategy": strategy, - } - - def _generate_detailed_metrics_with_timeframe( - self, symbol: str, strategy: str, timeframe: str, start_date: str, end_date: str - ) -> dict: - """Generate detailed metrics using real database data.""" - from sqlalchemy.orm import sessionmaker - - from src.core.direct_backtest import run_direct_backtest - from src.database.db_connection import get_sync_engine - from src.database.models import Trade - - # Get real backtest data - try: - result = run_direct_backtest( - symbol=symbol, - strategy_name=strategy, - start_date=start_date, - end_date=end_date, - timeframe=timeframe, - ) - - # Get real trades from database - engine = get_sync_engine() - Session = sessionmaker(bind=engine) - session = Session() - - trades = ( - session.query(Trade) - .filter_by(symbol=symbol, strategy=strategy, timeframe=timeframe) - .all() - ) - - # Convert trades to orders format for display - orders = [] - for trade in trades: - orders.append( - { - "date": trade.trade_datetime.strftime("%Y-%m-%d"), - "type": trade.side, - "price": float(trade.price), - "size": float(trade.size), - "equity": float(trade.equity_after or 0), - } - ) - - session.close() - - metrics = result["metrics"] - return { - "overview": { - "PSR": metrics.get("sortino_ratio", 0), - "sharpe_ratio": metrics.get("sharpe_ratio", 0), - "total_orders": len(orders), - "net_profit": metrics.get("total_return", 0) * 100, - "average_win": metrics.get("average_win", 0) * 100, - "average_loss": abs(metrics.get("average_loss", 0)) * 100, - "max_drawdown": abs(metrics.get("max_drawdown", 0)) * 100, - "win_rate": metrics.get("win_rate", 0) * 100, - "best_timeframe": timeframe, - }, - "orders": orders, - "equity_curve": result.get("equity_curve", []), - "benchmark_curve": result.get("benchmark_curve", []), - } - except Exception as e: - print(f"Error generating real metrics for {symbol}/{strategy}: {e}") - return { - "overview": { - "PSR": 0, - "sharpe_ratio": 0, - "total_orders": 0, - "net_profit": 0, - "average_win": 0, - "average_loss": 0, - "max_drawdown": 0, - "win_rate": 0, - "best_timeframe": timeframe, - }, - "orders": [], - "equity_curve": [], - "benchmark_curve": [], - } - - def _generate_orders( - self, - symbol: str, - start_date: datetime, - end_date: datetime, - num_orders: int, - initial_equity: float, - ) -> list[dict]: - """Generate realistic order data.""" - orders = [] - current_equity = initial_equity - current_holdings = 0 - - for i in range(num_orders): - # Random date within range - total_days = (end_date - start_date).days - random_days = self.rng.integers(0, int(total_days)) - order_date = start_date + timedelta(days=int(random_days)) - - # Order details - order_type = self.rng.choice( - ["buy", "sell"], p=[0.6, 0.4] if current_holdings == 0 else [0.3, 0.7] - ) - price = self.rng.uniform(50, 500) - - if order_type == "buy": - max_quantity = int(current_equity * 0.3 / price) # Max 30% of equity - quantity = self.rng.integers( - 1, max(2, max_quantity + 1) - ) # Ensure high > low - cost = quantity * price - fees = cost * 0.001 # 0.1% fees - current_equity -= cost + fees - current_holdings += quantity - else: - if current_holdings > 0: - quantity = self.rng.integers(1, current_holdings + 1) - revenue = quantity * price - fees = revenue * 0.001 - current_equity += revenue - fees - current_holdings -= quantity - else: - continue - - orders.append( - { - "datetime": order_date.strftime("%Y-%m-%d %H:%M:%S"), - "symbol": symbol, - "type": order_type.upper(), - "price": round(price, 2), - "quantity": quantity, - "status": "FILLED", - "tag": f"Strategy_{i % 5}", - "equity": round(current_equity, 2), - "fees": round(fees, 2), - "holdings": current_holdings, - "net_profit": round(current_equity - initial_equity, 2), - "unrealized": ( - round( - ( - current_holdings * price - - sum( - [ - o["quantity"] * o["price"] - for o in orders - if o["type"] == "BUY" - ] - ) - ), - 2, - ) - if current_holdings > 0 - else 0 - ), - "volume": quantity * price, - } - ) - - return sorted(orders, key=lambda x: x["datetime"]) - - def _generate_equity_curve( - self, - start_date: datetime, - end_date: datetime, - initial_equity: float, - final_equity: float, - ) -> list[dict]: - """Generate equity curve data.""" - days = (end_date - start_date).days - curve = [] - - # Generate smooth curve with some volatility - for i in range(days): - date = start_date + timedelta(days=i) - progress = i / days - - # Base growth with some random walk - base_value = initial_equity + (final_equity - initial_equity) * progress - noise = self.rng.normal(0, base_value * 0.02) # 2% daily volatility - value = max( - base_value + noise, initial_equity * 0.7 - ) # Don't go below 30% loss - - curve.append({"date": date.strftime("%Y-%m-%d"), "equity": round(value, 2)}) - - return curve - - def _generate_benchmark_curve( - self, - symbol: str, - start_date: datetime, - end_date: datetime, - initial_value: float, - ) -> list[dict]: - """Generate actual Buy & Hold benchmark curve data using real backtest.""" - from src.core.backtest_engine import BacktestConfig, UnifiedBacktestEngine - from src.core.cache_manager import UnifiedCacheManager - from src.core.data_manager import UnifiedDataManager - - try: - # Run actual Buy & Hold backtest for this symbol - data_manager = UnifiedDataManager() - cache_manager = UnifiedCacheManager() - engine = UnifiedBacktestEngine(data_manager, cache_manager) - - config = BacktestConfig( - symbols=[symbol], - strategies=["BuyAndHold"], - start_date=start_date.strftime("%Y-%m-%d"), - end_date=end_date.strftime("%Y-%m-%d"), - initial_capital=initial_value, - use_cache=True, - ) - - # Run the actual Buy & Hold backtest - result = engine.run_backtest(symbol, "BuyAndHold", config) - - # Convert backtest equity curve to benchmark format - if hasattr(result, "equity_curve") and result.equity_curve: - curve = [] - for date_str, value in result.equity_curve.items(): - curve.append( - {"date": date_str, "benchmark": round(float(value), 2)} - ) - return curve - - except Exception as e: - self.logger.warning( - "Failed to generate actual Buy & Hold benchmark for %s: %s", symbol, e - ) - - # Fallback to simple simulation if backtest fails - days = (end_date - start_date).days - curve = [] - - # Use conservative market returns as fallback - annual_return = 0.08 # 8% annual return (market average) - daily_return = annual_return / 365 - - for i in range(days): - date = start_date + timedelta(days=i) - value = initial_value * (1 + daily_return) ** i - curve.append( - {"date": date.strftime("%Y-%m-%d"), "benchmark": round(value, 2)} - ) - - return curve - - def _create_html_report( - self, portfolio_config: dict, assets_data: dict, start_date: str, end_date: str - ) -> str: - """Create comprehensive HTML report.""" - html = f""" - - - - Portfolio Analysis: {portfolio_config["name"]} - - - - -
    -
    -

    {portfolio_config["name"]}

    -

    Comprehensive Strategy Analysis • {start_date} to {end_date}

    -
    -""" - - # Generate content for each asset - for symbol, asset_info in assets_data.items(): - data = asset_info["data"] - strategy = asset_info["best_strategy"] - timeframe = asset_info.get("best_timeframe", "1d") - overview = data["overview"] - - # Build CSS classes for metrics - psr_class = "positive" if overview["PSR"] > 0.5 else "" - sharpe_class = "positive" if overview["sharpe_ratio"] > 1 else "" - profit_class = "positive" if overview["net_profit"] > 0 else "negative" - alpha_class = "positive" if overview["alpha"] > 0 else "negative" - sortino_class = "positive" if overview["sortino_ratio"] > 1 else "" - - # Extract long values - annual_return = overview["compounding_annual_return"] - best_timeframe = overview.get("best_timeframe", "1d") - - html += f""" -
    -
    -

    {symbol}

    -
    - Best: {strategy.replace("_", " ").title()} - ⏰ {timeframe} -
    -
    - -
    -
    -
    PSR
    -
    {overview["PSR"]:.3f}
    -
    -
    -
    Sharpe Ratio
    -
    {overview["sharpe_ratio"]:.3f}
    -
    -
    -
    Total Orders
    -
    {overview["total_orders"]:,}
    -
    -
    -
    Net Profit
    -
    {overview["net_profit"]:.2f}%
    -
    -
    -
    Average Win
    -
    {overview["average_win"]:.2f}%
    -
    -
    -
    Average Loss
    -
    {overview["average_loss"]:.2f}%
    -
    -
    -
    Annual Return
    -
    {annual_return:.2f}%
    -
    -
    -
    Max Drawdown
    -
    {overview["drawdown"]:.2f}%
    -
    -
    -
    Win Rate
    -
    {overview["win_rate"]:.1f}%
    -
    -
    -
    Profit/Loss Ratio
    -
    {overview["profit_loss_ratio"]:.2f}
    -
    -
    -
    Alpha
    -
    {overview["alpha"]:.3f}
    -
    -
    -
    Beta
    -
    {overview["beta"]:.3f}
    -
    -
    -
    Sortino Ratio
    -
    {overview["sortino_ratio"]:.3f}
    -
    -
    -
    Total Fees
    -
    ${overview["total_fees"]:,.2f}
    -
    -
    -
    Strategy Capacity
    -
    ${overview["strategy_capacity"]:,.0f}
    -
    -
    -
    Portfolio Turnover
    -
    {overview["portfolio_turnover"]:.2f}%
    -
    -
    -
    Best Timeframe
    -
    {best_timeframe}
    -
    -
    -
    Combination Rank
    -
    {overview.get("combination_rank", "1/1")}
    -
    -
    - -
    -
    - - - -
    - -
    -
    -
    - -
    -
    -

    Strategy + Timeframe Combinations Analysis

    - - - - - - - - - - - - - - -""" - - # Add timeframe analysis rows if available - if "timeframe_analysis" in data: - sorted_combos = sorted( - data["timeframe_analysis"], key=lambda x: x["score"], reverse=True - ) - for i, combo in enumerate(sorted_combos[:20], 1): # Show top 20 - is_best = ( - combo["strategy"] == strategy - and combo["timeframe"] == timeframe - ) - status_badge = "🏆 BEST" if is_best else "" - row_class = "summary-row" if is_best else "" - - html += f""" - - - - - - - - - - -""" - - html += """ - -
    RankStrategyTimeframeSharpe RatioTotal ReturnMax DrawdownWin RateStatus
    {i}{combo["strategy"].replace("_", " ").title()}{combo["timeframe"]}{combo["score"]:.3f}{combo["metrics"]["total_return"]:.1f}%{combo["metrics"]["max_drawdown"]:.1f}%{combo["metrics"]["win_rate"]:.1f}%{status_badge}
    -
    -
    - -
    -
    - - - - - - - - - - - - - - - -""" - - # Add order rows (show last 50 to keep size reasonable) - recent_orders = ( - data["orders"][-50:] if len(data["orders"]) > 50 else data["orders"] - ) - for order in recent_orders: - order_type_class = "buy" if order["type"] == "BUY" else "sell" - profit_class = "positive" if order["net_profit"] > 0 else "negative" - - html += f""" - - - - - - - - - - - -""" - - # Add summary row - total_fees = sum(order["fees"] for order in data["orders"]) - final_equity = ( - data["orders"][-1]["equity"] - if data["orders"] - else overview["start_equity"] - ) - - html += f""" - - - - - - - - - -
    Date/TimeTypePriceQuantityEquityFeesHoldingsNet ProfitUnrealized
    {order["datetime"]}{order["type"]}${order["price"]:.2f}{order["quantity"]:,}${order["equity"]:,.2f}${order["fees"]:.2f}{order["holdings"]:,}${order["net_profit"]:,.2f}${order["unrealized"]:,.2f}
    SUMMARY ({len(data["orders"])} total orders)${final_equity:,.2f}${total_fees:.2f}-${overview["net_profit"]:,.2f}%-
    -
    -
    -
    -
    -""" - - # Add JavaScript for charts and interactivity - html += """ -
    - - - -""" - - return html - - def _save_compressed_report(self, html_content: str, portfolio_name: str) -> str: - """Save HTML report with quarterly organization and compression.""" - # Create temporary file first - reports_dir = Path("exports/reports") - reports_dir.mkdir(exist_ok=True) - - # Generate temporary filename - timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") - temp_filename = ( - f"portfolio_report_{portfolio_name.replace(' ', '_')}_{timestamp}.html" - ) - temp_filepath = reports_dir / temp_filename - - # Save temporary HTML file - with temp_filepath.open("w", encoding="utf-8") as f: - f.write(html_content) - - # Organize into quarterly structure (this will handle overriding existing reports) - organized_path = self.report_organizer.organize_report( - str(temp_filepath), portfolio_name, datetime.now(timezone.utc) - ) - - # Remove temporary file - temp_filepath.unlink() - - # Save compressed version alongside organized report - with gzip.open( - organized_path.with_suffix(".html.gz"), "wt", encoding="utf-8" - ) as f: - f.write(html_content) - - # Return path to organized HTML file - return str(organized_path) diff --git a/src/utils/report_organizer.py b/src/reporting/report_organizer.py similarity index 82% rename from src/utils/report_organizer.py rename to src/reporting/report_organizer.py index fd4cbac..5c21e4a 100644 --- a/src/utils/report_organizer.py +++ b/src/reporting/report_organizer.py @@ -34,7 +34,11 @@ def get_portfolio_name_from_filename(self, filename: str) -> str | None: return None def organize_report( - self, report_path: str, portfolio_name: str, report_date: datetime | None = None + self, + report_path: str, + portfolio_name: str, + report_date: datetime | None = None, + interval: str | None = None, ) -> Path: """ Organize a report into quarterly structure. @@ -56,9 +60,12 @@ def organize_report( # Clean portfolio name for filename clean_portfolio_name = portfolio_name.replace(" ", "_").replace("/", "_") + interval_part = (interval or "multi").replace("/", "-") - # New filename format: {portfolio_name}_Q{quarter}_{year}.html - new_filename = f"{clean_portfolio_name}_Q{quarter}_{year}.html" + # Unified filename format: _Collection___.html + new_filename = ( + f"{clean_portfolio_name}_Collection_{year}_Q{quarter}_{interval_part}.html" + ) target_path = quarterly_dir / new_filename # Check if report already exists for this portfolio/quarter @@ -107,8 +114,10 @@ def organize_existing_reports(self) -> None: # If parsing fails, use current date report_date = datetime.now(timezone.utc) - # Organize the report - self.organize_report(str(report_file), portfolio_name, report_date) + # Organize the report (no interval info, use 'multi') + self.organize_report( + str(report_file), portfolio_name, report_date, interval="multi" + ) # Remove original file after organizing report_file.unlink() @@ -130,10 +139,26 @@ def get_latest_report(self, portfolio_name: str) -> Path | None: if year_dir.is_dir(): for quarter_dir in year_dir.glob("Q?"): if quarter_dir.is_dir(): + # Prefer unified naming with interval; fallback to legacy report_path = ( quarter_dir - / f"{clean_portfolio_name}_Q{quarter_dir.name[1]}_{year_dir.name}.html" + / f"{clean_portfolio_name}_Collection_{year_dir.name}_{quarter_dir.name}_1d.html" ) + if not report_path.exists(): + # Try any interval by globbing + candidates = list( + quarter_dir.glob( + f"{clean_portfolio_name}_Collection_{year_dir.name}_{quarter_dir.name}_*.html" + ) + ) + if candidates: + report_path = candidates[0] + else: + # Legacy fallback + report_path = ( + quarter_dir + / f"{clean_portfolio_name}_Q{quarter_dir.name[1]}_{year_dir.name}.html" + ) if report_path.exists(): year = int(year_dir.name) quarter = int(quarter_dir.name[1]) diff --git a/src/reporting/tailwind.input.css b/src/reporting/tailwind.input.css new file mode 100644 index 0000000..adc9d8b --- /dev/null +++ b/src/reporting/tailwind.input.css @@ -0,0 +1,8 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; + +/* Optional: custom utility tweaks for cards/tables */ +.card { @apply rounded-xl border border-white/10 bg-white/5 backdrop-blur; } +.card-header { @apply px-5 py-4; } +.card-body { @apply p-5; } diff --git a/src/utils/raw_data_csv_exporter.py b/src/utils/csv_exporter.py similarity index 80% rename from src/utils/raw_data_csv_exporter.py rename to src/utils/csv_exporter.py index 6fad5bc..5e66642 100644 --- a/src/utils/raw_data_csv_exporter.py +++ b/src/utils/csv_exporter.py @@ -30,7 +30,8 @@ class RawDataCSVExporter: - Integration with existing quarterly report structure """ - def __init__(self, output_dir: str = "exports/data_exports"): + def __init__(self, output_dir: str = "exports/csv"): + # Default output directory aligned with repo: exports/csv self.output_dir = Path(output_dir) self.output_dir.mkdir(parents=True, exist_ok=True) self.reports_dir = Path("exports/reports") @@ -44,6 +45,7 @@ def export_from_database_primary( export_format: str = "full", portfolio_name: str = "all", portfolio_path: str | None = None, + interval: str | None = None, ) -> list[str]: """ Export data directly from database - primary data source for CSV exports. @@ -88,14 +90,59 @@ def export_from_database_primary( except Exception as e: self.logger.warning("Could not load portfolio config: %s", e) - # Query best strategies from database with optional symbol filtering + # Query best strategies from database with optional symbol filtering. + # Primary canonical table is backtests.best_strategies (models.BestStrategy). + # If that is empty (e.g., legacy or different persistence layer), fall back + # to the lightweight unified_models BestStrategy table (unified_models.BestStrategy). query = db_session.query(BestStrategy) if portfolio_symbols: query = query.filter(BestStrategy.symbol.in_(portfolio_symbols)) + # Filter by timeframe/interval if provided + if "interval" in locals() and interval: + try: + query = query.filter(BestStrategy.timeframe == interval) + except Exception: + pass best_strategies = query.all() + # Fallback to unified_models if no rows found in canonical backtests schema + if not best_strategies: + try: + from src.database import unified_models + + sess2 = unified_models.Session() + try: + uq = sess2.query(unified_models.BestStrategy) + if portfolio_symbols: + uq = uq.filter( + unified_models.BestStrategy.symbol.in_( + portfolio_symbols + ) + ) + if "interval" in locals() and interval: + try: + uq = uq.filter( + unified_models.BestStrategy.timeframe == interval + ) + except Exception: + pass + unified_rows = uq.all() + if unified_rows: + # Map unified_models rows into a structure compatible with the rest of this function. + # unified_models.BestStrategy has attributes with same names used below (symbol, timeframe, strategy, sortino_ratio, calmar_ratio, sharpe_ratio, total_return, max_drawdown, updated_at) + best_strategies = unified_rows + self.logger.info( + "Fell back to unified_models BestStrategy table (%d rows)", + len(best_strategies), + ) + finally: + sess2.close() + except Exception: + # If fallback fails, continue with empty list to trigger no-data path below + pass + if not best_strategies: self.logger.warning( "No strategies found in database for specified filters" @@ -132,18 +179,37 @@ def export_from_database_primary( df = pd.DataFrame(data) # Create output directory following standard naming convention - csv_output_dir = Path("exports/csv") / year / quarter + csv_output_dir = self.output_dir / year / quarter csv_output_dir.mkdir(parents=True, exist_ok=True) # Generate filename following naming convention + # Prefer human-readable collection name from config when available + display_name = portfolio_name or "All_Collections" + if portfolio_path: + try: + import json + + with Path(portfolio_path).open() as f: + portfolio_config = json.load(f) + portfolio_key = list(portfolio_config.keys())[0] + display_name = ( + portfolio_config[portfolio_key].get("name") or display_name + ) + except Exception: + pass + + # Sanitize and build unified base filename: _Collection___ + sanitized = re.sub(r"\W+", "_", str(display_name)).strip("_") + safe_interval = (interval or "multi").replace("/", "-") if output_filename: base_filename = output_filename.replace(".csv", "") else: - # Use actual collection name from portfolio, not generic names - base_filename = f"{portfolio_name}_collection" + base_filename = ( + f"{sanitized}_Collection_{year}_{quarter}_{safe_interval}" + ) if export_format == "best-strategies": - filename = f"{base_filename}_best_strategies_{quarter}_{year}.csv" + filename = f"{base_filename}.csv" # Keep only one row per symbol with highest Sortino ratio df = ( df.sort_values("Sortino_Ratio", ascending=False) @@ -164,7 +230,7 @@ def export_from_database_primary( } ) elif export_format == "quarterly": - filename = f"{base_filename}_quarterly_summary_{quarter}_{year}.csv" + filename = f"{base_filename}.csv" # Create summary statistics summary_data = [] for symbol in df["Symbol"].unique(): @@ -185,7 +251,7 @@ def export_from_database_primary( ) df = pd.DataFrame(summary_data) else: # full - filename = f"{base_filename}_detailed_results_{quarter}_{year}.csv" + filename = f"{base_filename}.csv" # Keep all data with proper column names df = df.rename( columns={ @@ -218,6 +284,8 @@ def export_from_quarterly_reports( year: str, output_filename: str | None = None, export_format: str = "full", + collection_name: str | None = None, + interval: str | None = None, ) -> list[str]: """ Extract data from existing quarterly reports and export to CSV. @@ -248,7 +316,7 @@ def export_from_quarterly_reports( "Found %d HTML reports for %s %s", len(html_files), quarter, year ) - # Create quarterly directory structure + # Create quarterly directory structure under exports/csv quarterly_dir = self.output_dir / year / quarter quarterly_dir.mkdir(parents=True, exist_ok=True) @@ -266,12 +334,15 @@ def export_from_quarterly_reports( # Convert to DataFrame df = pd.DataFrame(extracted_data) - # Generate CSV filename based on HTML filename - csv_filename = html_file.stem + ".csv" # Remove .html and add .csv - - # Override with custom filename if provided and only one file - if output_filename and len(html_files) == 1: - csv_filename = output_filename + # Build unified filename + name_for_file = collection_name or html_file.stem + sanitized = re.sub(r"\W+", "_", str(name_for_file)).strip("_") + safe_interval = (interval or "multi").replace("/", "-") + csv_filename = ( + output_filename + if output_filename and len(html_files) == 1 + else f"{sanitized}_Collection_{year}_{quarter}_{safe_interval}.csv" + ) # Process based on format if export_format == "best-strategies": @@ -519,7 +590,11 @@ def _parse_metric_card(self, card) -> dict[str, Any] | None: return None def _export_from_database( - self, quarter: str, year: str, export_format: str = "full" + self, + quarter: str, + year: str, + export_format: str = "full", + interval: str | None = None, ) -> list[str]: """ Export data directly from database when HTML reports have no data. @@ -538,7 +613,13 @@ def _export_from_database( db_session = get_db_session() # Query all best strategies from database - best_strategies = db_session.query(BestStrategy).all() + q = db_session.query(BestStrategy) + if "interval" in locals() and interval: + try: + q = q.filter(BestStrategy.timeframe == interval) + except Exception: + pass + best_strategies = q.all() if not best_strategies: self.logger.warning("No strategies found in database") @@ -568,12 +649,15 @@ def _export_from_database( df = pd.DataFrame(data) # Create output directory - csv_output_dir = Path("exports/csv") / year / quarter + csv_output_dir = self.output_dir / year / quarter csv_output_dir.mkdir(parents=True, exist_ok=True) - # Generate filename + # Generate filename (fallback method) using unified convention + safe_interval = (interval or "multi").replace("/", "-") + filename = ( + f"All_Collections_Collection_{year}_{quarter}_{safe_interval}.csv" + ) if export_format == "best-strategies": - filename = f"database_best_strategies_{quarter}_{year}.csv" # Keep only one row per symbol with highest Sortino ratio df = ( df.sort_values("Sortino_Ratio", ascending=False) @@ -582,9 +666,9 @@ def _export_from_database( .reset_index() ) elif export_format == "quarterly": - filename = f"database_quarterly_summary_{quarter}_{year}.csv" + pass else: # full - filename = f"database_all_strategies_{quarter}_{year}.csv" + pass output_file = csv_output_dir / filename diff --git a/src/utils/simple_csv_exporter.py b/src/utils/simple_csv_exporter.py deleted file mode 100644 index 20cf276..0000000 --- a/src/utils/simple_csv_exporter.py +++ /dev/null @@ -1,199 +0,0 @@ -""" -Simple CSV Exporter - Direct Backtesting Library Data -Exports real performance data from database to CSV format. -""" - -from __future__ import annotations - -import logging -from datetime import datetime -from pathlib import Path -from typing import Optional - -import pandas as pd - -from src.database import get_db_session -from src.database.models import BacktestResult, BestStrategy, Trade - - -class SimpleCSVExporter: - """Export real backtesting data to CSV format.""" - - def __init__(self, output_dir: str = "exports/csv"): - self.logger = logging.getLogger(__name__) - self.output_dir = Path(output_dir) - self.output_dir.mkdir(parents=True, exist_ok=True) - - def export_best_strategies(self, filename: Optional[str] = None) -> str: - """Export best strategies with real backtesting library data to CSV.""" - if not filename: - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"best_strategies_real_data_{timestamp}.csv" - - session = get_db_session() - - try: - # Get all best strategies from database - best_strategies = ( - session.query(BestStrategy) - .order_by(BestStrategy.sortino_ratio.desc()) - .all() - ) - - self.logger.info( - "Exporting %d best strategies to CSV", len(best_strategies) - ) - - # Convert to CSV format - csv_data = [] - for bs in best_strategies: - csv_data.append( - { - "Symbol": bs.symbol, - "Best_Strategy": bs.strategy, - "Timeframe": bs.timeframe, - "Sortino_Ratio": float(bs.sortino_ratio or 0), - "Sharpe_Ratio": float(bs.sharpe_ratio or 0), - "Total_Return_Percent": float(bs.total_return or 0), - "Max_Drawdown_Percent": float(bs.max_drawdown or 0), - "Calmar_Ratio": float(bs.calmar_ratio or 0), - "Data_Source": "backtesting_library_real_data", - "Last_Updated": bs.updated_at.strftime("%Y-%m-%d %H:%M:%S") - if bs.updated_at - else "", - } - ) - - # Create DataFrame and save - df = pd.DataFrame(csv_data) - output_path = self.output_dir / filename - - df.to_csv(output_path, index=False) - - self.logger.info("CSV export completed: %s", output_path) - return str(output_path) - - except Exception as e: - self.logger.error("CSV export failed: %s", e) - raise e - finally: - session.close() - - def export_detailed_results( - self, symbol: Optional[str] = None, filename: Optional[str] = None - ) -> str: - """Export detailed backtest results to CSV.""" - if not filename: - symbol_suffix = f"_{symbol}" if symbol else "_all" - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"backtest_results{symbol_suffix}_{timestamp}.csv" - - session = get_db_session() - - try: - query = session.query(BacktestResult) - if symbol: - query = query.filter(BacktestResult.symbols.any(symbol)) - - results = query.order_by(BacktestResult.sortino_ratio.desc()).all() - - self.logger.info("Exporting %d detailed results to CSV", len(results)) - - csv_data = [] - for result in results: - symbols_str = ",".join(result.symbols) if result.symbols else "" - csv_data.append( - { - "Symbols": symbols_str, - "Strategy": result.strategy, - "Timeframe": result.timeframe, - "Start_Date": result.start_date.strftime("%Y-%m-%d") - if result.start_date - else "", - "End_Date": result.end_date.strftime("%Y-%m-%d") - if result.end_date - else "", - "Initial_Capital": float(result.initial_capital or 0), - "Final_Value": float(result.final_value or 0), - "Total_Return_Percent": float(result.total_return or 0), - "Sortino_Ratio": float(result.sortino_ratio or 0), - "Sharpe_Ratio": float(result.sharpe_ratio or 0), - "Calmar_Ratio": float(result.calmar_ratio or 0), - "Max_Drawdown_Percent": float(result.max_drawdown or 0), - "Volatility_Percent": float(result.volatility or 0), - "Win_Rate_Percent": float(result.win_rate or 0), - "Number_of_Trades": int(result.trades_count or 0), - "Profit_Factor": float(result.profit_factor or 1), - "Data_Source": "backtesting_library_direct", - "Created_At": result.created_at.strftime("%Y-%m-%d %H:%M:%S") - if result.created_at - else "", - } - ) - - df = pd.DataFrame(csv_data) - output_path = self.output_dir / filename - - df.to_csv(output_path, index=False) - - self.logger.info("Detailed CSV export completed: %s", output_path) - return str(output_path) - - except Exception as e: - self.logger.error("Detailed CSV export failed: %s", e) - raise e - finally: - session.close() - - def export_trades( - self, symbol: Optional[str] = None, filename: Optional[str] = None - ) -> str: - """Export real trade data to CSV.""" - if not filename: - symbol_suffix = f"_{symbol}" if symbol else "_all" - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"real_trades{symbol_suffix}_{timestamp}.csv" - - session = get_db_session() - - try: - query = session.query(Trade) - if symbol: - query = query.filter_by(symbol=symbol) - - trades = query.order_by(Trade.trade_datetime.asc()).all() - - self.logger.info("Exporting %d real trades to CSV", len(trades)) - - csv_data = [] - for trade in trades: - csv_data.append( - { - "Symbol": trade.symbol, - "Strategy": trade.strategy, - "Timeframe": trade.timeframe, - "Trade_DateTime": trade.trade_datetime.strftime( - "%Y-%m-%d %H:%M:%S" - ), - "Trade_Type": trade.side, - "Price": float(trade.price), - "Size": float(trade.size), - "Equity_Before": float(trade.equity_before or 0), - "Equity_After": float(trade.equity_after or 0), - "Data_Source": "backtesting_library_real_trades", - } - ) - - df = pd.DataFrame(csv_data) - output_path = self.output_dir / filename - - df.to_csv(output_path, index=False) - - self.logger.info("Trades CSV export completed: %s", output_path) - return str(output_path) - - except Exception as e: - self.logger.error("Trades CSV export failed: %s", e) - raise e - finally: - session.close() diff --git a/src/utils/trades_parser.py b/src/utils/trades_parser.py new file mode 100644 index 0000000..5fdd92e --- /dev/null +++ b/src/utils/trades_parser.py @@ -0,0 +1,275 @@ +""" +Trades parser: parse a stringified pandas DataFrame (or JSON/CSV-like text) returned by the backtest +engine and normalize into a list of dicts suitable for insertion into the DB. + +Strategy: +- Try json.loads first (some engines return JSON) +- Try pandas (if available) to read JSON/CSV +- Try csv.Sniffer with common delimiters +- Last-resort: whitespace heuristic splitting on two-or-more spaces (pandas pretty-print) +- Normalize common column names to a canonical set + +Returns a list of dicts with keys such as: + trade_index, size, entry_bar, exit_bar, entry_price, exit_price, pnl, duration, tag, + entry_signals, exit_signals +""" + +from __future__ import annotations + +import csv +import io +import json +import re +from typing import Any, Dict, List, Optional + +CANONICAL_COLUMNS = { + # various possible column names mapped to canonical names + "index": "trade_index", + "trade_index": "trade_index", + "size": "size", + "qty": "size", + "quantity": "size", + "entry_bar": "entry_bar", + "entrybar": "entry_bar", + "entry": "entry_bar", + "entry_index": "entry_bar", + # entry/exit timestamps + "entry_time": "entry_time", + "entrytime": "entry_time", + "entry timestamp": "entry_time", + "entry_ts": "entry_time", + "entry_date": "entry_time", + "exit_bar": "exit_bar", + "exitbar": "exit_bar", + "exit": "exit_bar", + "exit_index": "exit_bar", + "exit_time": "exit_time", + "exittime": "exit_time", + "exit timestamp": "exit_time", + "exit_ts": "exit_time", + "exit_date": "exit_time", + "entry_price": "entry_price", + "entryprice": "entry_price", + "exit_price": "exit_price", + "exitprice": "exit_price", + "pnl": "pnl", + "profit": "pnl", + "pl": "pnl", + "duration": "duration", + "tag": "tag", + "entry_signals": "entry_signals", + "exit_signals": "exit_signals", + "signals": "entry_signals", +} + + +def _normalize_row(raw: Dict[Any, Any], idx: Optional[int] = None) -> Dict[str, Any]: + normalized: Dict[str, Any] = {} + # accept any-hashable keys and coerce to str for normalization + raw = {str(k): v for k, v in raw.items()} + # lower-case keys for matching + mapping = {str(k).lower().strip(): v for k, v in raw.items()} + # map known names + for k, v in mapping.items(): + canon = CANONICAL_COLUMNS.get(k) + if canon: + normalized[canon] = v + else: + # keep unknown columns as-is (but lowercased) + normalized[k] = v + # ensure trade_index present + if "trade_index" not in normalized: + if idx is not None: + try: + normalized["trade_index"] = int(idx) + except Exception: + normalized["trade_index"] = idx or 0 + else: + # try to extract numeric 'index' or fallback to 0 + normalized.setdefault("trade_index", 0) + return normalized + + +def _parse_with_pandas(text: str) -> Optional[List[Dict[str, Any]]]: + try: + import pandas as pd # type: ignore[import-not-found] + from pandas.errors import EmptyDataError # type: ignore[import-not-found] + except Exception: + return None + + # Try read_json (records) then read_csv + try: + df = pd.read_json(io.StringIO(text), orient="records") + if df is not None and not df.empty: + records = df.to_dict(orient="records") + return [_normalize_row(dict(r), idx=i) for i, r in enumerate(records)] + except Exception: + pass + + try: + df = pd.read_csv(io.StringIO(text)) + if df is not None and not df.empty: + records = df.to_dict(orient="records") + return [_normalize_row(dict(r), idx=i) for i, r in enumerate(records)] + except EmptyDataError: + return [] + except Exception: + # Try python engine with whitespace delimiter heuristics + try: + df = pd.read_csv(io.StringIO(text), sep=r"\s{2,}", engine="python") + if df is not None and not df.empty: + records = df.to_dict(orient="records") + return [_normalize_row(dict(r), idx=i) for i, r in enumerate(records)] + except Exception: + return None + return None + + +def _parse_with_csv(text: str) -> Optional[List[Dict[str, Any]]]: + sio = io.StringIO(text) + # sniff delimiter + try: + sample = text[:4096] + dialect = csv.Sniffer().sniff(sample, delimiters=",;\t|") + sio.seek(0) + reader = csv.DictReader(sio, dialect=dialect) + rows = [dict(r) for r in reader] + if rows: + return [_normalize_row(r, idx=i) for i, r in enumerate(rows)] + except Exception: + pass + + # fallback: comma + try: + sio.seek(0) + reader = csv.DictReader(sio) + rows = [dict(r) for r in reader] + if rows: + return [_normalize_row(r, idx=i) for i, r in enumerate(rows)] + except Exception: + pass + + return None + + +def _parse_whitespace_table(text: str) -> Optional[List[Dict[str, Any]]]: + """ + Parses pretty-printed pandas DataFrame which separates columns by two or more spaces. + Example: + index entry_bar exit_bar entry_price exit_price pnl + 0 100 120 10.5 12.0 1.5 + """ + lines = [ln.rstrip() for ln in text.splitlines() if ln.strip()] + if not lines: + return [] + # header detection: first line with word chars and spaces + header = lines[0] + # split on 2+ spaces + cols = re.split(r"\s{2,}", header.strip()) + if len(cols) < 2: + return None + data = [] + for ln in lines[1:]: + parts = re.split(r"\s{2,}", ln.strip()) + if len(parts) != len(cols): + # if mismatch, skip or try to pad + continue + row = dict(zip(cols, parts)) + data.append(row) + if not data: + return None + return [_normalize_row(r, idx=i) for i, r in enumerate(data)] + + +def parse_trades_from_string(trades_str: Optional[str]) -> List[Dict[str, Any]]: + """ + Public parser. Returns an empty list for falsy input. + + Steps: + - Try json.loads + - Try pandas-based parser + - Try csv.Sniffer-based parser + - Try whitespace table parser + - Fallback: return empty list + """ + if not trades_str: + return [] + + text = trades_str.strip() + + # 1) JSON + try: + obj = json.loads(text) + # If a dict representing a DF: convert to list + if isinstance(obj, dict): + # dict-of-lists or dict-of-dicts? try to convert to records + # common format: {"0": {...}, "1": {...}} or {"col": [..]} + if all(isinstance(v, list) for v in obj.values()): + # dict of columns -> convert to records + keys = list(obj.keys()) + length = len(next(iter(obj.values()), [])) + records = [] + for i in range(length): + rec = {k: obj[k][i] for k in keys} + records.append(rec) + return [_normalize_row(r, idx=i) for i, r in enumerate(records)] + # dict of records + if all(isinstance(v, dict) for v in obj.values()): + records = list(obj.values()) + return [_normalize_row(r, idx=i) for i, r in enumerate(records)] + # single record + return [_normalize_row(obj, idx=0)] + if isinstance(obj, list): + return [ + _normalize_row(r if isinstance(r, dict) else {"value": r}, idx=i) + for i, r in enumerate(obj) + ] + except Exception: + pass + + # 2) pandas + try: + pd_res = _parse_with_pandas(text) + if pd_res is not None: + return pd_res + except Exception: + pass + + # 3) csv + try: + csv_res = _parse_with_csv(text) + if csv_res is not None: + return csv_res + except Exception: + pass + + # 4) whitespace table + try: + ws = _parse_whitespace_table(text) + if ws is not None: + return ws + except Exception: + pass + + # 5) Last resort: try splitting lines and commas + lines = [ln for ln in text.splitlines() if ln.strip()] + if len(lines) == 1: + # single-line value + return [{"trade_index": 0, "value": lines[0]}] + # multiple lines: try simple CSV split + header = lines[0] + cols = [c.strip() for c in re.split(r"[,\t;|]+", header) if c.strip()] + if len(cols) >= 2: + data = [] + for i, ln in enumerate(lines[1:]): + parts = [p.strip() for p in re.split(r"[,\t;|]+", ln) if p.strip()][ + : len(cols) + ] + if len(parts) != len(cols): + continue + row = dict(zip(cols, parts)) + data.append(row) + if data: + return [_normalize_row(r, idx=i) for i, r in enumerate(data)] + # If nothing worked, return empty list + return [] diff --git a/src/utils/tradingview_alert_exporter.py b/src/utils/tv_alert_exporter.py similarity index 50% rename from src/utils/tradingview_alert_exporter.py rename to src/utils/tv_alert_exporter.py index b554aee..664b3ca 100644 --- a/src/utils/tradingview_alert_exporter.py +++ b/src/utils/tv_alert_exporter.py @@ -12,10 +12,20 @@ import os from datetime import datetime, timezone from pathlib import Path -from typing import Dict, List +from typing import Dict, List, Optional from bs4 import BeautifulSoup +# DB models +try: + from src.database.db_connection import ( + get_db_session, # type: ignore[import-not-found] + ) + from src.database.models import BestStrategy # type: ignore[import-not-found] +except Exception: # pragma: no cover - guarded imports + get_db_session = None # type: ignore[assignment] + BestStrategy = None # type: ignore[assignment] + class TradingViewAlertExporter: def __init__(self, reports_dir: str = "exports/reports"): @@ -45,35 +55,67 @@ def organize_output_path(self, base_dir: str) -> Path: return output_dir + def _build_filename( + self, collection_name: str, year: int, quarter: int, interval: str | None + ) -> str: + """Builds _Collection___.md""" + sanitized = ( + collection_name.replace(" ", "_").replace("/", "_").strip("_") + or "All_Collections" + ) + interval_part = (interval or "multi").replace("/", "-") + return f"{sanitized}_Collection_{year}_Q{quarter}_{interval_part}.md" + def extract_asset_data(self, html_content: str) -> List[Dict]: """Extract asset information from HTML report""" soup = BeautifulSoup(html_content, "html.parser") - assets = [] + assets: List[Dict] = [] - # Find all asset sections - asset_sections = soup.find_all("div", class_="asset-section") + # New Tailwind report structure (DetailedPortfolioReporter): sections with id="asset-" + section_nodes = soup.select("section[id^='asset-']") + for sec in section_nodes: + h2 = sec.find("h2") + symbol = h2.get_text(strip=True) if h2 else None + best_strategy = None + timeframe = None + # The header line contains two spans: "Best: " and "⏰ " + tag_spans = sec.find_all("span") + for sp in tag_spans: + txt = sp.get_text(strip=True) + if txt.startswith("Best:") and best_strategy is None: + best_strategy = txt.replace("Best:", "").strip() + if "⏰" in txt and timeframe is None: + timeframe = txt.replace("⏰", "").strip() + if symbol and best_strategy and timeframe: + assets.append( + { + "symbol": symbol, + "strategy": best_strategy, + "timeframe": timeframe, + "metrics": {}, + } + ) + + if assets: + return assets + # Fallback legacy structure support (older HTML reports) + legacy_assets: List[Dict] = [] + asset_sections = soup.find_all("div", class_="asset-section") for section in asset_sections: - # Extract asset symbol from title asset_title = section.find("h2", class_="asset-title") if not asset_title: continue - symbol = asset_title.text.strip() - - # Extract best strategy from strategy badge strategy_badges = section.find_all("span", class_="strategy-badge") best_strategy = None timeframe = None - for badge in strategy_badges: text = badge.text.strip() if text.startswith("Best:"): best_strategy = text.replace("Best:", "").strip() elif "⏰" in text: timeframe = text.replace("⏰", "").strip() - - # Extract metrics for additional context metrics = {} metric_cards = section.find_all("div", class_="metric-card") for card in metric_cards: @@ -83,9 +125,8 @@ def extract_asset_data(self, html_content: str) -> List[Dict]: label = label_elem.text.strip() value = value_elem.text.strip() metrics[label] = value - if symbol and best_strategy and timeframe: - assets.append( + legacy_assets.append( { "symbol": symbol, "strategy": best_strategy, @@ -93,8 +134,7 @@ def extract_asset_data(self, html_content: str) -> List[Dict]: "metrics": metrics, } ) - - return assets + return legacy_assets def generate_tradingview_alert(self, asset_data: Dict) -> str: """Generate TradingView alert message for asset""" @@ -151,49 +191,127 @@ def find_html_reports(self) -> List[Path]: html_files.append(Path(root) / file) return html_files - def export_alerts(self, output_file: str | None = None) -> Dict: - """Export all TradingView alerts""" - html_files = self.find_html_reports() - all_alerts = {} - - for html_file in html_files: - print(f"Processing: {html_file}") - assets = self.process_html_file(html_file) - - for asset in assets: - symbol = asset["symbol"] - alert = self.generate_tradingview_alert(asset) - - if symbol not in all_alerts: - all_alerts[symbol] = [] - - all_alerts[symbol].append({"alert_message": alert, "asset_data": asset}) - - # Write to file if specified - if output_file: - # Always organize by quarter/year following export naming convention - organized_dir = self.organize_output_path("exports/tradingview_alerts") - - # Generate proper filename based on collection and quarter/year - if output_file.endswith(".md"): - filename = output_file + def export_alerts( + self, + output_file: Optional[str] = None, + collection_filter: Optional[str] = None, + interval: Optional[str] = None, + symbols: Optional[List[str]] = None, + ) -> Dict: + """Export TradingView alerts using database BestStrategy data. + + - Filters by provided symbols when available (preferred). + - If symbols is None, uses all BestStrategy rows. + - Writes markdown under exports/tv_alerts//Q/ with unified name. + """ + all_alerts: Dict[str, List[Dict]] = {} + + # Query DB for best strategies + rows = [] + sess = None + try: + if get_db_session is None or BestStrategy is None: + raise RuntimeError("Database session/models unavailable for TV export") + sess = get_db_session() + q = sess.query(BestStrategy) + if symbols: + q = q.filter(BestStrategy.symbol.in_(symbols)) + # Optionally, prefer the provided interval if filtering is desired + if interval: + q = q.filter(BestStrategy.timeframe == interval) + rows = q.all() + # Fallback to unified_models if no rows found (similar to csv exporter) + if not rows: + try: + from src.database import ( + unified_models as um, # type: ignore[import-not-found] + ) + + usess = um.Session() + try: + uq = usess.query(um.BestStrategy) + if symbols: + uq = uq.filter(um.BestStrategy.symbol.in_(symbols)) + if interval: + uq = uq.filter(um.BestStrategy.timeframe == interval) + rows = uq.all() + finally: + usess.close() + except Exception: + rows = [] + finally: + if sess is not None: + try: + sess.close() + except Exception: + pass + + # If interval specified but produced no rows, relax interval filter + if interval and not rows: + try: + sess = get_db_session() + q = sess.query(BestStrategy) + if symbols: + q = q.filter(BestStrategy.symbol.in_(symbols)) + rows = q.all() + except Exception: + rows = rows + finally: + if sess is not None: + try: + sess.close() + except Exception: + pass + + # Build one alert per symbol using best Sortino + by_symbol: Dict[str, Dict] = {} + for r in rows: + sym = getattr(r, "symbol", None) + if not sym: + continue + entry = by_symbol.get(sym) + sortino = float(getattr(r, "sortino_ratio", 0.0) or 0.0) + if entry is None or sortino > entry.get("sortino_ratio", -1e9): + by_symbol[sym] = { + "symbol": sym, + "strategy": getattr(r, "strategy", ""), + "timeframe": getattr(r, "timeframe", interval or "1d"), + "metrics": { + "Sharpe Ratio": f"{float(getattr(r, 'sharpe_ratio', 0.0) or 0.0):.3f}", + "Sortino Ratio": f"{sortino:.3f}", + "Calmar Ratio": f"{float(getattr(r, 'calmar_ratio', 0.0) or 0.0):.3f}", + }, + } + + for sym, asset in by_symbol.items(): + alert = self.generate_tradingview_alert(asset) + if sym not in all_alerts: + all_alerts[sym] = [] + all_alerts[sym].append({"alert_message": alert, "asset_data": asset}) + + # Write to file if requested + if output_file is not None or collection_filter is not None: + organized_dir = self.organize_output_path("exports/tv_alerts") + now = datetime.now(timezone.utc) + year, q = self.get_quarter_from_date(now) + collection_name = collection_filter or "All_Collections" + if output_file and output_file not in ("tradingview_alerts.md",): + filename = ( + output_file if output_file.endswith(".md") else f"{output_file}.md" + ) else: - filename = f"{output_file}.md" - + filename = self._build_filename(collection_name, year, q, interval) output_path = organized_dir / filename with output_path.open("w", encoding="utf-8") as f: f.write("# TradingView Alert Messages\n\n") - for symbol, alerts in all_alerts.items(): f.write(f"## {symbol}\n\n") - for i, alert_data in enumerate(alerts): asset = alert_data["asset_data"] f.write( f"### Alert {i + 1} - {asset['strategy']} ({asset['timeframe']})\n" ) - f.write(f"**Source:** {asset['source_file']}\n\n") f.write("```\n") f.write(alert_data["alert_message"]) f.write("\n```\n\n") @@ -217,11 +335,15 @@ def main(): help="Output file for alerts (auto-organized by quarter/year if just filename)", ) parser.add_argument("--symbol", help="Export alerts for specific symbol only") + parser.add_argument( + "--collection", + help="Export alerts for specific collection/portfolio only (e.g., 'Commodities', 'Bonds')", + ) args = parser.parse_args() exporter = TradingViewAlertExporter(args.reports_dir) - alerts = exporter.export_alerts(args.output) + alerts = exporter.export_alerts(args.output, collection_filter=args.collection) print("\n📊 Export Summary:") print(f"Found {len(alerts)} assets with alerts") diff --git a/tailwind.config.js b/tailwind.config.js new file mode 100644 index 0000000..43a322a --- /dev/null +++ b/tailwind.config.js @@ -0,0 +1,12 @@ +/** @type {import('tailwindcss').Config} */ +module.exports = { + content: [ + './exports/reports/**/*.html', + './src/reporting/**/*.py', + ], + darkMode: 'class', + theme: { + extend: {}, + }, + plugins: [], +}; diff --git a/tests/cli/test_unified_cli_flags.py b/tests/cli/test_unified_cli_flags.py new file mode 100644 index 0000000..3b56a10 --- /dev/null +++ b/tests/cli/test_unified_cli_flags.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import json + +from src.cli.unified_cli import handle_collection_run + + +def test_no_cache_flag_sets_use_cache_false(tmp_path, monkeypatch, capsys): + # Create temp collection tree in a writable tmp directory + base = tmp_path / "config" / "collections" / "default" + base.mkdir(parents=True, exist_ok=True) + (base / "bonds_core.json").write_text( + json.dumps({"bonds_core": {"symbols": ["TLT"]}}) + ) + # Chdir so resolver finds temp config path + monkeypatch.chdir(tmp_path) + + # Use --dry-run so manifest prints to stdout + rc = handle_collection_run( + [ + "bonds_core", + "--action", + "direct", + "--interval", + "1d", + "--period", + "max", + "--no-cache", + "--dry-run", + ] + ) + assert rc == 0 + out = capsys.readouterr().out + manifest = json.loads(out) + assert manifest["plan"]["use_cache"] is False diff --git a/tests/cli/test_unified_cli_probe.py b/tests/cli/test_unified_cli_probe.py new file mode 100644 index 0000000..7bb09a6 --- /dev/null +++ b/tests/cli/test_unified_cli_probe.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import importlib +import json +import sys +from types import SimpleNamespace + + +def test_cli_probe_called(monkeypatch, tmp_path): + # Create temp collection + base = tmp_path / "config" / "collections" / "default" + base.mkdir(parents=True) + (base / "bonds_core.json").write_text( + json.dumps({"bonds_core": {"symbols": ["TLT"]}}) + ) + monkeypatch.chdir(tmp_path) + + # Patch heavy dependencies in unified_cli + mod = importlib.import_module("src.cli.unified_cli") + + # Fake UnifiedDataManager with probe flag + class FakeDM: + def __init__(self): + self.called = False + + def probe_and_set_order( + self, asset_type, symbols, interval="1d", sample_size=5 + ): + self.called = True + return ["yahoo_finance"] + + fake_dm_instance = FakeDM() + # Inject a fake module so `from src.core.data_manager import UnifiedDataManager` returns our fake + fake_dm_module = SimpleNamespace(UnifiedDataManager=lambda: fake_dm_instance) + monkeypatch.setitem(sys.modules, "src.core.data_manager", fake_dm_module) + + # Patch direct backtest functions to no-op by injecting a fake module in sys.modules + fake_direct_mod = SimpleNamespace( + finalize_persistence_for_run=lambda *a, **k: None, + run_direct_backtest=lambda **k: {}, + ) + monkeypatch.setitem(sys.modules, "src.core.direct_backtest", fake_direct_mod) + + # Patch unified_models ensure_run_for_manifest + mod.unified_models = SimpleNamespace( # type: ignore[attr-defined] + ensure_run_for_manifest=lambda m: SimpleNamespace(run_id="test-run") + ) + + # Run without --dry-run so run_plan executes + rc = mod.handle_collection_run( + ["bonds_core", "--action", "direct", "--interval", "1d", "--period", "max"] + ) + assert rc == 0 + assert fake_dm_instance.called is True diff --git a/tests/cli/test_unified_cli_resolver.py b/tests/cli/test_unified_cli_resolver.py new file mode 100644 index 0000000..ce50fab --- /dev/null +++ b/tests/cli/test_unified_cli_resolver.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from src.cli.unified_cli import resolve_collection_path + + +def test_resolver_default_and_custom(tmp_path: Path, monkeypatch): + base = tmp_path / "config" / "collections" + (base / "default").mkdir(parents=True) + (base / "custom").mkdir(parents=True) + + # Create default and custom jsons + (base / "default" / "bonds_core.json").write_text( + json.dumps({"bonds_core": {"symbols": ["TLT"]}}) + ) + (base / "custom" / "stocks_traderfox_us_tech.json").write_text( + json.dumps({"stocks_traderfox_us_tech": {"symbols": ["AAPL"]}}) + ) + + # Chdir so resolver finds our structure + monkeypatch.chdir(tmp_path) + + # Alias mapping: bonds -> bonds_core + p1 = resolve_collection_path("bonds") + assert p1.name == "bonds_core.json" + assert p1.parent.name == "default" + + # Custom key resolves + p2 = resolve_collection_path("stocks_traderfox_us_tech") + assert p2.name == "stocks_traderfox_us_tech.json" + assert p2.parent.name == "custom" + + +def test_resolver_missing_raises(tmp_path: Path, monkeypatch): + (tmp_path / "config" / "collections").mkdir(parents=True) + monkeypatch.chdir(tmp_path) + with pytest.raises(FileNotFoundError): + resolve_collection_path("does_not_exist") diff --git a/tests/core/test_cache_manager_redis.py b/tests/core/test_cache_manager_redis.py new file mode 100644 index 0000000..0a6865e --- /dev/null +++ b/tests/core/test_cache_manager_redis.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +import pandas as pd + +from src.core.cache_manager import UnifiedCacheManager + + +def test_redis_helpers_no_redis_installed(monkeypatch): + cm = UnifiedCacheManager() + # Ensure client is None + cm.redis_client = None + assert cm.get_recent_overlay_from_redis("TLT", "1d") is None + # set should not raise + cm.set_recent_overlay_to_redis("TLT", "1d", pd.DataFrame()) diff --git a/tests/core/test_data_manager_freshness.py b/tests/core/test_data_manager_freshness.py new file mode 100644 index 0000000..9914f87 --- /dev/null +++ b/tests/core/test_data_manager_freshness.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +import logging +from types import SimpleNamespace + +import pandas as pd + +from src.core.data_manager import UnifiedDataManager + + +def test_freshness_warning_for_daily(monkeypatch, caplog): + caplog.set_level(logging.WARNING) + dm = UnifiedDataManager() + + # Fake source returning a stale last bar (two business days ago) + class FakeSource: + def __init__(self): + self.config = SimpleNamespace( + name="yahoo_finance", priority=1, asset_types=["stocks"] + ) + + def fetch_data(self, symbol, start_date, end_date, interval, **kwargs): + idx = pd.date_range("2023-01-01", periods=10, freq="D") + return pd.DataFrame({"open": 1, "high": 1, "low": 1, "close": 1}, index=idx) + + # Route only to our fake source + monkeypatch.setattr(dm, "_get_sources_for_asset_type", lambda at: [FakeSource()]) + + # Force fetch path (skip cache) so freshness check executes + df = dm.get_data( + "AAPL", "2000-01-01", "2100-01-01", "1d", use_cache=False, asset_type="stocks" + ) + assert df is not None + assert not df.empty + # Assert warning logged + assert any("seems stale" in rec.message for rec in caplog.records) diff --git a/tests/core/test_data_manager_probe.py b/tests/core/test_data_manager_probe.py new file mode 100644 index 0000000..98d9f10 --- /dev/null +++ b/tests/core/test_data_manager_probe.py @@ -0,0 +1,34 @@ +from __future__ import annotations + +from types import SimpleNamespace + +import pandas as pd + +from src.core.data_manager import UnifiedDataManager + + +def test_probe_and_set_order(monkeypatch): + dm = UnifiedDataManager() + + # Create two fake sources with differing coverage + class FakeSource: + def __init__(self, name, rows, start): + self.config = SimpleNamespace(name=name, priority=1, asset_types=["stocks"]) + self._rows = rows + self._start = start + + def fetch_data(self, symbol, start_date, end_date, interval, **kwargs): + if self._rows == 0: + return None + idx = pd.date_range(self._start, periods=self._rows, freq="D") + return pd.DataFrame({"open": 1, "high": 1, "low": 1, "close": 1}, index=idx) + + dm.sources = { + "yahoo": FakeSource("yahoo", rows=10, start="2020-01-01"), + "alt": FakeSource("alt", rows=20, start="2019-01-01"), + } + + ordered = dm.probe_and_set_order( + "stocks", ["AAPL", "MSFT"], interval="1d", sample_size=2 + ) + assert ordered[0] == "alt" # more rows and earlier start diff --git a/tests/core/test_data_manager_split_cache.py b/tests/core/test_data_manager_split_cache.py new file mode 100644 index 0000000..e5e0e7d --- /dev/null +++ b/tests/core/test_data_manager_split_cache.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import pandas as pd + +from src.core.data_manager import UnifiedDataManager + + +def _df(dates, val): + idx = pd.to_datetime(dates) + return pd.DataFrame({"open": val, "high": val, "low": val, "close": val}, index=idx) + + +def test_split_cache_merge(monkeypatch): + dm = UnifiedDataManager() + + # Legacy fast-path must return None to exercise split layer merge + calls = {"legacy": 0} + + def fake_get_data( + symbol, start_date, end_date, interval, source=None, data_type=None + ): + calls["legacy"] += 1 + if data_type == "full": + return _df(["2023-01-01", "2023-01-10"], 1) + if data_type == "recent": + return _df(["2023-01-08", "2023-01-15"], 2) + return None + + monkeypatch.setattr(dm.cache_manager, "get_data", fake_get_data) + + df = dm.get_data("TLT", "2023-01-01", "2023-01-20", "1d", use_cache=True) + assert df is not None + assert not df.empty + # Last day should be 2023-01-15 given our recent overlay + assert df.index[-1].date().isoformat() == "2023-01-15" + # Overlap region should reflect recent overlay value (2) where both provide data + assert df.loc[pd.Timestamp("2023-01-08"), "close"] == 2 diff --git a/tests/core/test_portfolio_manager.py b/tests/core/test_portfolio_manager.py index 64b6209..42713c5 100644 --- a/tests/core/test_portfolio_manager.py +++ b/tests/core/test_portfolio_manager.py @@ -8,7 +8,7 @@ import pytest from src.core.backtest_engine import BacktestResult -from src.core.portfolio_manager import PortfolioManager +from src.core.collection_manager import PortfolioManager class TestPortfolioManager: diff --git a/tests/database/test_beststrategy_upsert.py b/tests/database/test_beststrategy_upsert.py new file mode 100644 index 0000000..e52acde --- /dev/null +++ b/tests/database/test_beststrategy_upsert.py @@ -0,0 +1,148 @@ +from __future__ import annotations + +from unittest.mock import patch + +from src.core import direct_backtest +from src.database import unified_models + + +def setup_module(module): + # Ensure sqlite tables exist for tests + unified_models.create_tables() + + +def test_beststrategy_upsert_from_run(tmp_path): + """ + Integration-style unit test that verifies run_strategy_comparison persists + BacktestResult rows and then upserts a canonical BestStrategy row based on + the configured target metric (sortino_ratio). + + Approach: + - Create a run via create_run_from_manifest to obtain run_id. + - Monkeypatch run_direct_backtest to return deterministic results for two strategies. + - Call run_strategy_comparison with persistence_context containing run_id and target_metric. + - Assert a BestStrategy row exists for the tested symbol/timeframe and matches the best metric. + """ + # Create a run manifest and insert run row + manifest = { + "plan": { + "plan_hash": "test-plan-hash-beststrategy", + "actor": "test", + "action": "backtest", + "collection": "test_collection", + "strategies": ["adx", "macd"], + "intervals": ["1d"], + "metric": "sortino_ratio", + } + } + run = unified_models.create_run_from_manifest(manifest) + assert run is not None + run_id = run.run_id + + symbol = "TEST" + start = "2020-01-01" + end = "2020-12-31" + timeframe = "1d" + + # Prepare deterministic fake results for two strategies + def fake_run_direct_backtest( + symbol_arg, + strategy_name, + start_date, + end_date, + timeframe_arg, + initial_capital, + persistence_context=None, + ): + # strategy 'macd' is better (higher sortino) + if strategy_name == "adx": + metrics = {"sortino_ratio": 0.5, "num_trades": 1} + else: + metrics = {"sortino_ratio": 2.0, "num_trades": 2} + + # Simulate persistence side-effect similar to _persist_result_to_db so the later + # ranking/finalization code finds BacktestResult rows in the DB. + try: + sess = unified_models.Session() + br = unified_models.BacktestResult( + run_id=(persistence_context or {}).get("run_id"), + symbol=symbol_arg, + strategy=strategy_name, + interval=timeframe_arg, + start_at_utc=start_date, + end_at_utc=end_date, + rank_in_symbol=None, + metrics=metrics, + engine_ctx={"summary": "ok"}, + trades_raw=None, + error=None, + ) + sess.add(br) + sess.flush() + sess.commit() + except Exception: + try: + sess.rollback() + except Exception: + pass + finally: + try: + sess.close() + except Exception: + pass + + return { + "symbol": symbol_arg, + "strategy": strategy_name, + "timeframe": timeframe_arg, + "error": None, + "metrics": metrics, + "trades": None, + "backtest_object": None, + "bt_results": {"summary": "ok"}, + "start_date": start_date, + "end_date": end_date, + } + + # Patch the run_direct_backtest used by run_strategy_comparison + with patch( + "src.core.direct_backtest.run_direct_backtest", + side_effect=fake_run_direct_backtest, + ): + out = direct_backtest.run_strategy_comparison( + symbol, + ["adx", "macd"], + start, + end, + timeframe, + initial_capital=10000.0, + persistence_context={"run_id": run_id, "target_metric": "sortino_ratio"}, + ) + + # Validate output contains best_strategy with macd + assert out["best_strategy"] is not None + assert ( + out["best_strategy"]["strategy"] == "macd" + or out["best_strategy"]["strategy"] == "MACD" + or out["best_strategy"]["strategy"].lower() == "macd" + ) + + # Now verify BestStrategy upsert exists in unified_models + sess = unified_models.Session() + try: + bs = ( + sess.query(unified_models.BestStrategy) + .filter_by(symbol=symbol, timeframe=timeframe) + .one_or_none() + ) + assert bs is not None, "BestStrategy was not upserted into the DB" + assert bs.strategy.lower() == "macd" + # Check sortino value was recorded (numeric-ish) + try: + val = float(bs.sortino_ratio) + assert val >= 2.0 + except Exception: + # If stored as JSON/text, still ensure the string contains '2' + assert "2" in str(bs.sortino_ratio) + finally: + sess.close() diff --git a/tests/scripts/test_data_health_report.py b/tests/scripts/test_data_health_report.py new file mode 100644 index 0000000..cc5249e --- /dev/null +++ b/tests/scripts/test_data_health_report.py @@ -0,0 +1,52 @@ +from __future__ import annotations + +import csv +import importlib.util +from pathlib import Path +from unittest.mock import patch + +import pandas as pd + + +def _load_module(): + p = Path("scripts/data_health_report.py") + spec = importlib.util.spec_from_file_location("data_health_report", p) + assert spec is not None + assert spec.loader is not None + mod = importlib.util.module_from_spec(spec) + assert mod is not None + spec.loader.exec_module(mod) + return mod + + +def test_health_report_outputs_csv(monkeypatch, tmp_path): + mod = _load_module() + + # Build fake DF + idx = pd.date_range("2023-01-01", periods=10, freq="D") + df = pd.DataFrame({"open": 1, "high": 1, "low": 1, "close": 1}, index=idx) + fake_dm = patch.object(mod, "UnifiedDataManager").start().return_value + fake_dm.get_data.return_value = df + + # Create a simple collection file + coll_dir = tmp_path / "config/collections/default" + coll_dir.mkdir(parents=True, exist_ok=True) + f = coll_dir / "bonds_core.json" + f.write_text('{"bonds_core": {"symbols": ["TLT", "IEF"]}}') + # Ensure resolver reads from tmp config + import os as _os + + old_cwd = Path.cwd() + _os.chdir(str(tmp_path)) + + out_csv = tmp_path / "health.csv" + rc = mod.main( + ["bonds_core", "--interval", "1d", "--period", "max", "--out", str(out_csv)] + ) + assert rc == 0 + assert out_csv.exists() + rows = list(csv.DictReader(out_csv.open())) + # two symbols + assert len(rows) == 2 + patch.stopall() + _os.chdir(str(old_cwd)) diff --git a/tests/scripts/test_prefetch_all.py b/tests/scripts/test_prefetch_all.py new file mode 100644 index 0000000..539a125 --- /dev/null +++ b/tests/scripts/test_prefetch_all.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import importlib.util +from pathlib import Path +from unittest.mock import patch + + +def _load_module(): + p = Path("scripts/prefetch_all.py") + spec = importlib.util.spec_from_file_location("prefetch_all", p) + assert spec is not None + assert spec.loader is not None + mod = importlib.util.module_from_spec(spec) + assert mod is not None + spec.loader.exec_module(mod) + return mod + + +def test_prefetch_all_calls(monkeypatch): + mod = _load_module() + # Patch prefetch_one inside loaded module + mock_pf = patch.object(mod, "prefetch_one").start() + try: + rc = mod.main( + [ + "bonds_core", + "indices_global_core", + "--mode", + "recent", + "--interval", + "1d", + "--recent-days", + "30", + ] + ) + assert rc == 0 + assert mock_pf.call_count == 2 + finally: + patch.stopall() diff --git a/tests/scripts/test_prefetch_collection.py b/tests/scripts/test_prefetch_collection.py new file mode 100644 index 0000000..4e272d8 --- /dev/null +++ b/tests/scripts/test_prefetch_collection.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +import importlib.util +from pathlib import Path +from unittest.mock import MagicMock + +_path = Path("scripts/prefetch_collection.py") +_spec = importlib.util.spec_from_file_location("prefetch_collection", _path) +assert _spec is not None +assert _spec.loader is not None +prefetch_mod = importlib.util.module_from_spec(_spec) +assert prefetch_mod is not None +_spec.loader.exec_module(prefetch_mod) +prefetch = prefetch_mod.prefetch + + +def test_prefetch_full(monkeypatch): + fake_dm = MagicMock() + monkeypatch.setattr( + prefetch_mod, "UnifiedDataManager", MagicMock(return_value=fake_dm) + ) + prefetch("bonds_core", mode="full", interval="1d", recent_days=90) + dm = fake_dm + # full: period='max', use_cache=False + dm.get_batch_data.assert_called_once() + args, kwargs = dm.get_batch_data.call_args + assert kwargs.get("use_cache") is False + assert kwargs.get("period") == "max" + + +def test_prefetch_recent(monkeypatch): + fake_dm = MagicMock() + monkeypatch.setattr( + prefetch_mod, "UnifiedDataManager", MagicMock(return_value=fake_dm) + ) + prefetch("bonds_core", mode="recent", interval="1d", recent_days=90) + dm = fake_dm + dm.get_batch_data.assert_called_once() + args, kwargs = dm.get_batch_data.call_args + assert kwargs.get("use_cache") is False + # recent should not set provider period + assert kwargs.get("period") is None diff --git a/tests/utils/test_csv_exporter_paths.py b/tests/utils/test_csv_exporter_paths.py new file mode 100644 index 0000000..5942910 --- /dev/null +++ b/tests/utils/test_csv_exporter_paths.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from src.utils.csv_exporter import RawDataCSVExporter + + +def test_csv_exporter_default_output_dir(): + exp = RawDataCSVExporter() + assert str(exp.output_dir).endswith("exports/csv") diff --git a/tests/utils/test_tradingview_alert_exporter.py b/tests/utils/test_tradingview_alert_exporter.py index 2b98358..c6c07d0 100644 --- a/tests/utils/test_tradingview_alert_exporter.py +++ b/tests/utils/test_tradingview_alert_exporter.py @@ -6,7 +6,7 @@ from pathlib import Path from unittest.mock import mock_open, patch -from src.utils.tradingview_alert_exporter import TradingViewAlertExporter +from src.utils.tv_alert_exporter import TradingViewAlertExporter class TestTradingViewAlertExporter: @@ -39,12 +39,12 @@ def test_get_quarter_from_date(self): assert year == 2023 assert quarter == 2 - @patch("src.utils.tradingview_alert_exporter.Path.mkdir") + @patch("src.utils.tv_alert_exporter.Path.mkdir") def test_organize_output_path(self, mock_mkdir): """Test organized output path creation.""" exporter = TradingViewAlertExporter() - with patch("src.utils.tradingview_alert_exporter.datetime") as mock_datetime: + with patch("src.utils.tv_alert_exporter.datetime") as mock_datetime: mock_datetime.now.return_value = datetime(2023, 6, 15) mock_datetime.timezone.utc = datetime.now().tzinfo @@ -91,7 +91,7 @@ def test_generate_alert_message(self): assert "1D" in alert @patch("builtins.open", new_callable=mock_open, read_data="") - @patch("src.utils.tradingview_alert_exporter.Path.exists") + @patch("src.utils.tv_alert_exporter.Path.exists") def test_process_html_file(self, mock_exists, mock_file): """Test HTML file processing.""" mock_exists.return_value = True diff --git a/tools/diagnose_cache.py b/tools/diagnose_cache.py new file mode 100644 index 0000000..f626791 --- /dev/null +++ b/tools/diagnose_cache.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +""" +Diagnostic helper to inspect UnifiedCacheManager stats and recent cache entries. +Run inside project root (or inside Docker) to get quick visibility into cache state. +""" + +from __future__ import annotations + +import json + +from src.core.cache_manager import UnifiedCacheManager + + +def main(): + cm = UnifiedCacheManager() + stats = cm.get_cache_stats() + print("Cache stats:") + print(json.dumps(stats, indent=2)) + + # List recent data cache entries + print("\nRecent data cache entries (up to 20):") + entries = cm._find_entries("data") # internal helper + entries_sorted = sorted(entries, key=lambda e: e.last_accessed, reverse=True) + for e in entries_sorted[:20]: + print( + f"- key={e.key} source={e.source} symbol={e.symbol} interval={e.interval} created_at={e.created_at.isoformat()} size_bytes={e.size_bytes}" + ) + + +if __name__ == "__main__": + main()